diff --git a/crates/core_arch/src/aarch64/mod.rs b/crates/core_arch/src/aarch64/mod.rs index d7295659c3..0292be2e0d 100644 --- a/crates/core_arch/src/aarch64/mod.rs +++ b/crates/core_arch/src/aarch64/mod.rs @@ -25,6 +25,20 @@ mod neon; #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub use self::neon::*; +// The rest of `core_arch::aarch64` is available on `arm64ec` but SVE is not supported on `arm64ec`. +#[cfg(any(target_arch = "aarch64", doc))] +mod sve; +#[cfg(any(target_arch = "aarch64", doc))] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub use self::sve::*; + +// The rest of `core_arch::aarch64` is available on `arm64ec` but SVE is not supported on `arm64ec`. +#[cfg(any(target_arch = "aarch64", doc))] +mod sve2; +#[cfg(any(target_arch = "aarch64", doc))] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub use self::sve2::*; + mod prefetch; #[unstable(feature = "stdarch_aarch64_prefetch", issue = "117217")] pub use self::prefetch::*; diff --git a/crates/core_arch/src/aarch64/sve/generated.rs b/crates/core_arch/src/aarch64/sve/generated.rs new file mode 100644 index 0000000000..d3790a7b5a --- /dev/null +++ b/crates/core_arch/src/aarch64/sve/generated.rs @@ -0,0 +1,45020 @@ +// This code is automatically generated. DO NOT MODIFY. +// +// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file: +// +// ``` +// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec +// ``` +#![allow(improper_ctypes)] + +#[cfg(test)] +use stdarch_test::assert_instr; + +use super::*; +use crate::core_arch::arch::aarch64::*; + +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv4f32")] + fn _svabd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svabd_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svabd_f32_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svabd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv2f64")] + fn _svabd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svabd_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svabd_f64_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svabd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv16i8")] + fn _svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svabd_s8_m(pg, op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svabd_s8_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svabd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv8i16")] + fn _svabd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svabd_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svabd_s16_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svabd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv4i32")] + fn _svabd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svabd_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svabd_s32_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svabd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv2i64")] + fn _svabd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svabd_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svabd_s64_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svabd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv16i8")] + fn _svabd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svabd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svabd_u8_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svabd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv8i16")] + fn _svabd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svabd_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svabd_u16_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svabd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv4i32")] + fn _svabd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svabd_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svabd_u32_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svabd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv2i64")] + fn _svabd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svabd_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svabd_u64_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svabd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv4f32")] + fn _svabs_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svabs_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svabs_f32_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svabs_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv2f64")] + fn _svabs_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svabs_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svabs_f64_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svabs_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv16i8")] + fn _svabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svabs_s8_m(inactive, pg, op) } +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svabs_s8_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svabs_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv8i16")] + fn _svabs_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svabs_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svabs_s16_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svabs_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv4i32")] + fn _svabs_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svabs_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svabs_s32_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svabs_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv2i64")] + fn _svabs_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svabs_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svabs_s64_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svabs_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facge.nxv4f32")] + fn _svacge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svacge_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svacge_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facge.nxv2f64")] + fn _svacge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svacge_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svacge_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facgt.nxv4f32")] + fn _svacgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svacgt_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svacgt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facgt.nxv2f64")] + fn _svacgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svacgt_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svacgt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svacge_f32(pg, op2, op1) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svacle_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svacge_f64(pg, op2, op1) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svacle_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svacgt_f32(pg, op2, op1) +} +#[doc = "Absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svaclt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svacgt_f64(pg, op2, op1) +} +#[doc = "Absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svaclt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadd.nxv4f32")] + fn _svadd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svadd_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svadd_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svadd_f32_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svadd_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svadd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svadd_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadd.nxv2f64")] + fn _svadd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svadd_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svadd_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svadd_f64_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svadd_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svadd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svadd_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv16i8")] + fn _svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svadd_s8_m(pg, op1, op2) } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svadd_s8_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv8i16")] + fn _svadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svadd_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svadd_s16_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv4i32")] + fn _svadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svadd_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svadd_s32_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv2i64")] + fn _svadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svadd_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svadd_s64_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svadd_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svadd_u8_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svadd_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svadd_u16_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svadd_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svadd_u32_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svadd_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svadd_u64_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Add reduction (strictly-ordered)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadda))] +pub fn svadda_f32(pg: svbool_t, initial: f32, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadda.nxv4f32")] + fn _svadda_f32(pg: svbool4_t, initial: f32, op: svfloat32_t) -> f32; + } + unsafe { _svadda_f32(pg.sve_into(), initial, op) } +} +#[doc = "Add reduction (strictly-ordered)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadda))] +pub fn svadda_f64(pg: svbool_t, initial: f64, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadda.nxv2f64")] + fn _svadda_f64(pg: svbool2_t, initial: f64, op: svfloat64_t) -> f64; + } + unsafe { _svadda_f64(pg.sve_into(), initial, op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(faddv))] +pub fn svaddv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv4f32")] + fn _svaddv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svaddv_f32(pg.sve_into(), op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(faddv))] +pub fn svaddv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv2f64")] + fn _svaddv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svaddv_f64(pg.sve_into(), op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv2i64")] + fn _svaddv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svaddv_s64(pg.sve_into(), op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv2i64")] + fn _svaddv_u64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svaddv_u64(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddv))] +pub fn svaddv_s8(pg: svbool_t, op: svint8_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv16i8")] + fn _svaddv_s8(pg: svbool_t, op: svint8_t) -> i64; + } + unsafe { _svaddv_s8(pg, op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddv))] +pub fn svaddv_s16(pg: svbool_t, op: svint16_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv8i16")] + fn _svaddv_s16(pg: svbool8_t, op: svint16_t) -> i64; + } + unsafe { _svaddv_s16(pg.sve_into(), op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddv))] +pub fn svaddv_s32(pg: svbool_t, op: svint32_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv4i32")] + fn _svaddv_s32(pg: svbool4_t, op: svint32_t) -> i64; + } + unsafe { _svaddv_s32(pg.sve_into(), op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u8(pg: svbool_t, op: svuint8_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv16i8")] + fn _svaddv_u8(pg: svbool_t, op: svint8_t) -> i64; + } + unsafe { _svaddv_u8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u16(pg: svbool_t, op: svuint16_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv8i16")] + fn _svaddv_u16(pg: svbool8_t, op: svint16_t) -> i64; + } + unsafe { _svaddv_u16(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u32(pg: svbool_t, op: svuint32_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv4i32")] + fn _svaddv_u32(pg: svbool4_t, op: svint32_t) -> i64; + } + unsafe { _svaddv_u32(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Compute vector addresses for 8-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u32base]_[s32]offset)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u32base_s32offset(bases: svuint32_t, offsets: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrb.nxv4i32")] + fn _svadrb_u32base_s32offset(bases: svint32_t, offsets: svint32_t) -> svint32_t; + } + unsafe { _svadrb_u32base_s32offset(bases.as_signed(), offsets).as_unsigned() } +} +#[doc = "Compute vector addresses for 16-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u32base]_[s32]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrh.nxv4i32")] + fn _svadrh_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svadrh_u32base_s32index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 32-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u32base]_[s32]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrw.nxv4i32")] + fn _svadrw_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svadrw_u32base_s32index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 64-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u32base]_[s32]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrd.nxv4i32")] + fn _svadrd_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svadrd_u32base_s32index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 8-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u32base]_[u32]offset)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u32base_u32offset(bases: svuint32_t, offsets: svuint32_t) -> svuint32_t { + unsafe { svadrb_u32base_s32offset(bases, offsets.as_signed()) } +} +#[doc = "Compute vector addresses for 16-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u32base]_[u32]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svadrh_u32base_s32index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 32-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u32base]_[u32]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svadrw_u32base_s32index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 64-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u32base]_[u32]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svadrd_u32base_s32index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 8-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u64base]_[s64]offset)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u64base_s64offset(bases: svuint64_t, offsets: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrb.nxv2i64")] + fn _svadrb_u64base_s64offset(bases: svint64_t, offsets: svint64_t) -> svint64_t; + } + unsafe { _svadrb_u64base_s64offset(bases.as_signed(), offsets).as_unsigned() } +} +#[doc = "Compute vector addresses for 16-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u64base]_[s64]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrh.nxv2i64")] + fn _svadrh_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svadrh_u64base_s64index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 32-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u64base]_[s64]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrw.nxv2i64")] + fn _svadrw_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svadrw_u64base_s64index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 64-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u64base]_[s64]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrd.nxv2i64")] + fn _svadrd_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svadrd_u64base_s64index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 8-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u64base]_[u64]offset)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u64base_u64offset(bases: svuint64_t, offsets: svuint64_t) -> svuint64_t { + unsafe { svadrb_u64base_s64offset(bases, offsets.as_signed()) } +} +#[doc = "Compute vector addresses for 16-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u64base]_[u64]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svadrh_u64base_s64index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 32-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u64base]_[u64]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svadrw_u64base_s64index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 64-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u64base]_[u64]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svadrd_u64base_s64index(bases, indices.as_signed()) } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.z.nvx16i1")] + fn _svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svand_b_z(pg, op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv16i8")] + fn _svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svand_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svand_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svand_s8_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svand_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svand_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svand_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv8i16")] + fn _svand_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svand_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svand_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svand_s16_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svand_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svand_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svand_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv4i32")] + fn _svand_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svand_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svand_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svand_s32_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svand_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svand_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svand_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv2i64")] + fn _svand_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svand_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svand_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svand_s64_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svand_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svand_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svand_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svand_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svand_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svand_u8_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svand_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svand_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svand_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svand_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svand_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svand_u16_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svand_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svand_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svand_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svand_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svand_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svand_u32_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svand_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svand_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svand_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svand_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svand_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svand_u64_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svand_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svand_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svand_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv16i8")] + fn _svandv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svandv_s8(pg, op) } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv8i16")] + fn _svandv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svandv_s16(pg.sve_into(), op) } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv4i32")] + fn _svandv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svandv_s32(pg.sve_into(), op) } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv2i64")] + fn _svandv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svandv_s64(pg.sve_into(), op) } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svandv_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svandv_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svandv_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svandv_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv16i8")] + fn _svasr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svasr_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svasr_s8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svasr_s8_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svasr_s8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svasr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svasr_s8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv8i16")] + fn _svasr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svasr_s16_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svasr_s16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svasr_s16_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svasr_s16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svasr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svasr_s16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv4i32")] + fn _svasr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svasr_s32_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svasr_s32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svasr_s32_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svasr_s32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svasr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svasr_s32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv2i64")] + fn _svasr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svasr_s64_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svasr_s64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svasr_s64_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svasr_s64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svasr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svasr_s64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.asr.wide.nxv16i8" + )] + fn _svasr_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t; + } + unsafe { _svasr_wide_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svasr_wide_s8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svasr_wide_s8_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svasr_wide_s8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svasr_wide_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svasr_wide_s8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.asr.wide.nxv8i16" + )] + fn _svasr_wide_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t; + } + unsafe { _svasr_wide_s16_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svasr_wide_s16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svasr_wide_s16_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svasr_wide_s16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svasr_wide_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svasr_wide_s16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.asr.wide.nxv4i32" + )] + fn _svasr_wide_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svasr_wide_s32_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svasr_wide_s32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svasr_wide_s32_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svasr_wide_s32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svasr_wide_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svasr_wide_s32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s8_m(pg: svbool_t, op1: svint8_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv16i8")] + fn _svasrd_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svasrd_n_s8_m(pg, op1, IMM2) } +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s8_x(pg: svbool_t, op1: svint8_t) -> svint8_t { + svasrd_n_s8_m::(pg, op1) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s8_z(pg: svbool_t, op1: svint8_t) -> svint8_t { + svasrd_n_s8_m::(pg, svsel_s8(pg, op1, svdup_n_s8(0))) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s16_m(pg: svbool_t, op1: svint16_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv8i16")] + fn _svasrd_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svasrd_n_s16_m(pg.sve_into(), op1, IMM2) } +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s16_x(pg: svbool_t, op1: svint16_t) -> svint16_t { + svasrd_n_s16_m::(pg, op1) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s16_z(pg: svbool_t, op1: svint16_t) -> svint16_t { + svasrd_n_s16_m::(pg, svsel_s16(pg, op1, svdup_n_s16(0))) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s32_m(pg: svbool_t, op1: svint32_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv4i32")] + fn _svasrd_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svasrd_n_s32_m(pg.sve_into(), op1, IMM2) } +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s32_x(pg: svbool_t, op1: svint32_t) -> svint32_t { + svasrd_n_s32_m::(pg, op1) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s32_z(pg: svbool_t, op1: svint32_t) -> svint32_t { + svasrd_n_s32_m::(pg, svsel_s32(pg, op1, svdup_n_s32(0))) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s64_m(pg: svbool_t, op1: svint64_t) -> svint64_t { + static_assert_range!(IMM2, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv2i64")] + fn _svasrd_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svasrd_n_s64_m(pg.sve_into(), op1, IMM2) } +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s64_x(pg: svbool_t, op1: svint64_t) -> svint64_t { + svasrd_n_s64_m::(pg, op1) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s64_z(pg: svbool_t, op1: svint64_t) -> svint64_t { + svasrd_n_s64_m::(pg, svsel_s64(pg, op1, svdup_n_s64(0))) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.z.nvx16i1")] + fn _svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbic_b_z(pg, op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv16i8")] + fn _svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbic_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svbic_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svbic_s8_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svbic_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svbic_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svbic_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv8i16")] + fn _svbic_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbic_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svbic_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svbic_s16_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svbic_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svbic_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svbic_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv4i32")] + fn _svbic_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbic_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svbic_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svbic_s32_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svbic_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svbic_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svbic_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv2i64")] + fn _svbic_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbic_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svbic_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svbic_s64_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svbic_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svbic_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svbic_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svbic_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svbic_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svbic_u8_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svbic_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svbic_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svbic_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svbic_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svbic_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svbic_u16_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svbic_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svbic_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svbic_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svbic_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svbic_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svbic_u32_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svbic_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svbic_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svbic_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svbic_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svbic_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svbic_u64_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svbic_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svbic_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svbic_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Break after first true condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brka))] +pub fn svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brka.nxv16i1")] + fn _svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrka_b_m(inactive, pg, op) } +} +#[doc = "Break after first true condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brka))] +pub fn svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brka.z.nxv16i1")] + fn _svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrka_b_z(pg, op) } +} +#[doc = "Break before first true condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brkb))] +pub fn svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkb.nxv16i1")] + fn _svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrkb_b_m(inactive, pg, op) } +} +#[doc = "Break before first true condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brkb))] +pub fn svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkb.z.nxv16i1")] + fn _svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrkb_b_z(pg, op) } +} +#[doc = "Propagate break to next partition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkn[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brkn))] +pub fn svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkn.z.nxv16i1")] + fn _svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbrkn_b_z(pg, op1, op2) } +} +#[doc = "Break after first true condition, propagating from previous partition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpa[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brkpa))] +pub fn svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.brkpa.z.nxv16i1" + )] + fn _svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbrkpa_b_z(pg, op1, op2) } +} +#[doc = "Break before first true condition, propagating from previous partition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpb[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brkpb))] +pub fn svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.brkpb.z.nxv16i1" + )] + fn _svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbrkpb_b_z(pg, op1, op2) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv4f32")] + fn _svcadd_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcadd_f32_m(pg.sve_into(), op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + svcadd_f32_m::(pg, op1, op2) +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + svcadd_f32_m::(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv2f64")] + fn _svcadd_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + imm_rotation: i32, + ) -> svfloat64_t; + } + unsafe { _svcadd_f64_m(pg.sve_into(), op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + svcadd_f64_m::(pg, op1, op2) +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + svcadd_f64_m::(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_f32(pg: svbool_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv4f32")] + fn _svclasta_f32(pg: svbool4_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t; + } + unsafe { _svclasta_f32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_f64(pg: svbool_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv2f64")] + fn _svclasta_f64(pg: svbool2_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t; + } + unsafe { _svclasta_f64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv16i8")] + fn _svclasta_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t; + } + unsafe { _svclasta_s8(pg, fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s16(pg: svbool_t, fallback: svint16_t, data: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv8i16")] + fn _svclasta_s16(pg: svbool8_t, fallback: svint16_t, data: svint16_t) -> svint16_t; + } + unsafe { _svclasta_s16(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s32(pg: svbool_t, fallback: svint32_t, data: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv4i32")] + fn _svclasta_s32(pg: svbool4_t, fallback: svint32_t, data: svint32_t) -> svint32_t; + } + unsafe { _svclasta_s32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s64(pg: svbool_t, fallback: svint64_t, data: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv2i64")] + fn _svclasta_s64(pg: svbool2_t, fallback: svint64_t, data: svint64_t) -> svint64_t; + } + unsafe { _svclasta_s64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u8(pg: svbool_t, fallback: svuint8_t, data: svuint8_t) -> svuint8_t { + unsafe { svclasta_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u16(pg: svbool_t, fallback: svuint16_t, data: svuint16_t) -> svuint16_t { + unsafe { svclasta_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u32(pg: svbool_t, fallback: svuint32_t, data: svuint32_t) -> svuint32_t { + unsafe { svclasta_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u64(pg: svbool_t, fallback: svuint64_t, data: svuint64_t) -> svuint64_t { + unsafe { svclasta_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_f32(pg: svbool_t, fallback: f32, data: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv4f32" + )] + fn _svclasta_n_f32(pg: svbool4_t, fallback: f32, data: svfloat32_t) -> f32; + } + unsafe { _svclasta_n_f32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_f64(pg: svbool_t, fallback: f64, data: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv2f64" + )] + fn _svclasta_n_f64(pg: svbool2_t, fallback: f64, data: svfloat64_t) -> f64; + } + unsafe { _svclasta_n_f64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv16i8" + )] + fn _svclasta_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8; + } + unsafe { _svclasta_n_s8(pg, fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s16(pg: svbool_t, fallback: i16, data: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv8i16" + )] + fn _svclasta_n_s16(pg: svbool8_t, fallback: i16, data: svint16_t) -> i16; + } + unsafe { _svclasta_n_s16(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s32(pg: svbool_t, fallback: i32, data: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv4i32" + )] + fn _svclasta_n_s32(pg: svbool4_t, fallback: i32, data: svint32_t) -> i32; + } + unsafe { _svclasta_n_s32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s64(pg: svbool_t, fallback: i64, data: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv2i64" + )] + fn _svclasta_n_s64(pg: svbool2_t, fallback: i64, data: svint64_t) -> i64; + } + unsafe { _svclasta_n_s64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u8(pg: svbool_t, fallback: u8, data: svuint8_t) -> u8 { + unsafe { svclasta_n_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u16(pg: svbool_t, fallback: u16, data: svuint16_t) -> u16 { + unsafe { svclasta_n_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u32(pg: svbool_t, fallback: u32, data: svuint32_t) -> u32 { + unsafe { svclasta_n_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u64(pg: svbool_t, fallback: u64, data: svuint64_t) -> u64 { + unsafe { svclasta_n_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_f32(pg: svbool_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv4f32")] + fn _svclastb_f32(pg: svbool4_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t; + } + unsafe { _svclastb_f32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_f64(pg: svbool_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv2f64")] + fn _svclastb_f64(pg: svbool2_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t; + } + unsafe { _svclastb_f64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv16i8")] + fn _svclastb_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t; + } + unsafe { _svclastb_s8(pg, fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s16(pg: svbool_t, fallback: svint16_t, data: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv8i16")] + fn _svclastb_s16(pg: svbool8_t, fallback: svint16_t, data: svint16_t) -> svint16_t; + } + unsafe { _svclastb_s16(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s32(pg: svbool_t, fallback: svint32_t, data: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv4i32")] + fn _svclastb_s32(pg: svbool4_t, fallback: svint32_t, data: svint32_t) -> svint32_t; + } + unsafe { _svclastb_s32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s64(pg: svbool_t, fallback: svint64_t, data: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv2i64")] + fn _svclastb_s64(pg: svbool2_t, fallback: svint64_t, data: svint64_t) -> svint64_t; + } + unsafe { _svclastb_s64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u8(pg: svbool_t, fallback: svuint8_t, data: svuint8_t) -> svuint8_t { + unsafe { svclastb_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u16(pg: svbool_t, fallback: svuint16_t, data: svuint16_t) -> svuint16_t { + unsafe { svclastb_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u32(pg: svbool_t, fallback: svuint32_t, data: svuint32_t) -> svuint32_t { + unsafe { svclastb_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u64(pg: svbool_t, fallback: svuint64_t, data: svuint64_t) -> svuint64_t { + unsafe { svclastb_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_f32(pg: svbool_t, fallback: f32, data: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv4f32" + )] + fn _svclastb_n_f32(pg: svbool4_t, fallback: f32, data: svfloat32_t) -> f32; + } + unsafe { _svclastb_n_f32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_f64(pg: svbool_t, fallback: f64, data: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv2f64" + )] + fn _svclastb_n_f64(pg: svbool2_t, fallback: f64, data: svfloat64_t) -> f64; + } + unsafe { _svclastb_n_f64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv16i8" + )] + fn _svclastb_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8; + } + unsafe { _svclastb_n_s8(pg, fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s16(pg: svbool_t, fallback: i16, data: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv8i16" + )] + fn _svclastb_n_s16(pg: svbool8_t, fallback: i16, data: svint16_t) -> i16; + } + unsafe { _svclastb_n_s16(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s32(pg: svbool_t, fallback: i32, data: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv4i32" + )] + fn _svclastb_n_s32(pg: svbool4_t, fallback: i32, data: svint32_t) -> i32; + } + unsafe { _svclastb_n_s32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s64(pg: svbool_t, fallback: i64, data: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv2i64" + )] + fn _svclastb_n_s64(pg: svbool2_t, fallback: i64, data: svint64_t) -> i64; + } + unsafe { _svclastb_n_s64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u8(pg: svbool_t, fallback: u8, data: svuint8_t) -> u8 { + unsafe { svclastb_n_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u16(pg: svbool_t, fallback: u16, data: svuint16_t) -> u16 { + unsafe { svclastb_n_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u32(pg: svbool_t, fallback: u32, data: svuint32_t) -> u32 { + unsafe { svclastb_n_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u64(pg: svbool_t, fallback: u64, data: svuint64_t) -> u64 { + unsafe { svclastb_n_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv16i8")] + fn _svcls_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svcls_s8_m(inactive.as_signed(), pg, op).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe { svcls_s8_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t { + svcls_s8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv8i16")] + fn _svcls_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svcls_s16_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe { svcls_s16_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t { + svcls_s16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv4i32")] + fn _svcls_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcls_s32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe { svcls_s32_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t { + svcls_s32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv2i64")] + fn _svcls_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcls_s64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe { svcls_s64_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t { + svcls_s64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv16i8")] + fn _svclz_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svclz_s8_m(inactive.as_signed(), pg, op).as_unsigned() } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe { svclz_s8_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t { + svclz_s8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv8i16")] + fn _svclz_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svclz_s16_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe { svclz_s16_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t { + svclz_s16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv4i32")] + fn _svclz_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svclz_s32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe { svclz_s32_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t { + svclz_s32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv2i64")] + fn _svclz_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svclz_s64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe { svclz_s64_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t { + svclz_s64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svclz_s8_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svclz_u8_m(op, pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svclz_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svclz_s16_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svclz_u16_m(op, pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svclz_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svclz_s32_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svclz_u32_m(op, pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svclz_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svclz_s64_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svclz_u64_m(op, pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svclz_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv4f32")] + fn _svcmla_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcmla_f32_m(pg.sve_into(), op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svcmla_f32_m::(pg, op1, op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svcmla_f32_m::(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv2f64")] + fn _svcmla_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + imm_rotation: i32, + ) -> svfloat64_t; + } + unsafe { _svcmla_f64_m(pg.sve_into(), op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svcmla_f64_m::(pg, op1, op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svcmla_f64_m::(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fcmla.lane.x.nxv4f32" + )] + fn _svcmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + imm_index: i32, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcmla_lane_f32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv4f32")] + fn _svcmpeq_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpeq_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpeq_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv2f64")] + fn _svcmpeq_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpeq_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpeq_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv16i8")] + fn _svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpeq_s8(pg, op1, op2) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpeq_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv8i16")] + fn _svcmpeq_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpeq_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpeq_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv4i32")] + fn _svcmpeq_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpeq_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpeq_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv2i64")] + fn _svcmpeq_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpeq_s64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpeq_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmpeq_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpeq_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmpeq_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpeq_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmpeq_s32(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpeq_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmpeq_s64(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpeq_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpeq.wide.nxv16i8" + )] + fn _svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpeq_wide_s8(pg, op1, op2) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpeq_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpeq.wide.nxv8i16" + )] + fn _svcmpeq_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpeq_wide_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpeq_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpeq.wide.nxv4i32" + )] + fn _svcmpeq_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpeq_wide_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpeq_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv4f32")] + fn _svcmpge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpge_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpge_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv2f64")] + fn _svcmpge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpge_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpge_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv16i8")] + fn _svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpge_s8(pg, op1, op2) } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpge_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv8i16")] + fn _svcmpge_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpge_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpge_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv4i32")] + fn _svcmpge_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpge_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpge_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv2i64")] + fn _svcmpge_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpge_s64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpge_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv16i8")] + fn _svcmpge_u8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpge_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpge_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv8i16")] + fn _svcmpge_u16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpge_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpge_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv4i32")] + fn _svcmpge_u32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpge_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpge_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv2i64")] + fn _svcmpge_u64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpge_u64(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpge_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpge.wide.nxv16i8" + )] + fn _svcmpge_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpge_wide_s8(pg, op1, op2) } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpge_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpge.wide.nxv8i16" + )] + fn _svcmpge_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpge_wide_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpge_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpge.wide.nxv4i32" + )] + fn _svcmpge_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpge_wide_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpge_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphs.wide.nxv16i8" + )] + fn _svcmpge_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpge_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmpge_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphs.wide.nxv8i16" + )] + fn _svcmpge_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpge_wide_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmpge_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphs.wide.nxv4i32" + )] + fn _svcmpge_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpge_wide_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmpge_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv4f32")] + fn _svcmpgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpgt_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpgt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv2f64")] + fn _svcmpgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpgt_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpgt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv16i8")] + fn _svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpgt_s8(pg, op1, op2) } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpgt_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv8i16")] + fn _svcmpgt_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpgt_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpgt_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv4i32")] + fn _svcmpgt_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpgt_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpgt_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv2i64")] + fn _svcmpgt_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpgt_s64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpgt_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv16i8")] + fn _svcmpgt_u8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpgt_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpgt_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv8i16")] + fn _svcmpgt_u16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpgt_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpgt_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv4i32")] + fn _svcmpgt_u32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpgt_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpgt_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv2i64")] + fn _svcmpgt_u64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpgt_u64(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpgt_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpgt.wide.nxv16i8" + )] + fn _svcmpgt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpgt_wide_s8(pg, op1, op2) } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpgt_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpgt.wide.nxv8i16" + )] + fn _svcmpgt_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpgt_wide_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpgt_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpgt.wide.nxv4i32" + )] + fn _svcmpgt_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpgt_wide_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpgt_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphi.wide.nxv16i8" + )] + fn _svcmpgt_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpgt_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmpgt_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphi.wide.nxv8i16" + )] + fn _svcmpgt_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpgt_wide_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmpgt_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphi.wide.nxv4i32" + )] + fn _svcmpgt_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpgt_wide_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmpgt_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svcmpge_f32(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmple_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svcmpge_f64(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmple_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + svcmpge_s8(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmple_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + svcmpge_s16(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmple_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + svcmpge_s32(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmple_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + svcmpge_s64(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmple_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + svcmpge_u8(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmple_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + svcmpge_u16(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmple_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + svcmpge_u32(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmple_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + svcmpge_u64(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmple_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmple.wide.nxv16i8" + )] + fn _svcmple_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmple_wide_s8(pg, op1, op2) } +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmple_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmple.wide.nxv8i16" + )] + fn _svcmple_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmple_wide_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmple_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmple.wide.nxv4i32" + )] + fn _svcmple_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmple_wide_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmple_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpls.wide.nxv16i8" + )] + fn _svcmple_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmple_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmple_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpls.wide.nxv8i16" + )] + fn _svcmple_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmple_wide_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmple_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpls.wide.nxv4i32" + )] + fn _svcmple_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmple_wide_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmple_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svcmpgt_f32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmplt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svcmpgt_f64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmplt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + svcmpgt_s8(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmplt_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + svcmpgt_s16(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmplt_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + svcmpgt_s32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmplt_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + svcmpgt_s64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmplt_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + svcmpgt_u8(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmplt_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + svcmpgt_u16(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmplt_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + svcmpgt_u32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmplt_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + svcmpgt_u64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmplt_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplt.wide.nxv16i8" + )] + fn _svcmplt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmplt_wide_s8(pg, op1, op2) } +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmplt_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplt.wide.nxv8i16" + )] + fn _svcmplt_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmplt_wide_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmplt_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplt.wide.nxv4i32" + )] + fn _svcmplt_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmplt_wide_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmplt_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplo.wide.nxv16i8" + )] + fn _svcmplt_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmplt_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmplt_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplo.wide.nxv8i16" + )] + fn _svcmplt_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmplt_wide_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmplt_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplo.wide.nxv4i32" + )] + fn _svcmplt_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmplt_wide_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmplt_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv4f32")] + fn _svcmpne_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpne_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpne_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv2f64")] + fn _svcmpne_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpne_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpne_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv16i8")] + fn _svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpne_s8(pg, op1, op2) } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpne_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv8i16")] + fn _svcmpne_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpne_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpne_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv4i32")] + fn _svcmpne_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpne_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpne_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv2i64")] + fn _svcmpne_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpne_s64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpne_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmpne_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpne_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmpne_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpne_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmpne_s32(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpne_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmpne_s64(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpne_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpne.wide.nxv16i8" + )] + fn _svcmpne_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpne_wide_s8(pg, op1, op2) } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpne_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpne.wide.nxv8i16" + )] + fn _svcmpne_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpne_wide_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpne_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpne.wide.nxv4i32" + )] + fn _svcmpne_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpne_wide_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpne_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare unordered with"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpuo.nxv4f32")] + fn _svcmpuo_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpuo_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare unordered with"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpuo_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare unordered with"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpuo.nxv2f64")] + fn _svcmpuo_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpuo_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare unordered with"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpuo_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv16i8")] + fn _svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svcnot_s8_m(inactive, pg, op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svcnot_s8_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svcnot_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv8i16")] + fn _svcnot_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svcnot_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svcnot_s16_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svcnot_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv4i32")] + fn _svcnot_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcnot_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svcnot_s32_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svcnot_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv2i64")] + fn _svcnot_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcnot_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svcnot_s64_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svcnot_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svcnot_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnot_u8_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnot_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svcnot_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnot_u16_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnot_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcnot_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnot_u32_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnot_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcnot_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnot_u64_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnot_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f32_m(inactive: svuint32_t, pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv4f32")] + fn _svcnt_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svcnt_f32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe { svcnt_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + svcnt_f32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f64_m(inactive: svuint64_t, pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv2f64")] + fn _svcnt_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svcnt_f64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe { svcnt_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + svcnt_f64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv16i8")] + fn _svcnt_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svcnt_s8_m(inactive.as_signed(), pg, op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe { svcnt_s8_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t { + svcnt_s8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv8i16")] + fn _svcnt_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svcnt_s16_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe { svcnt_s16_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t { + svcnt_s16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv4i32")] + fn _svcnt_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcnt_s32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe { svcnt_s32_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t { + svcnt_s32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv2i64")] + fn _svcnt_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcnt_s64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe { svcnt_s64_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t { + svcnt_s64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svcnt_s8_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnt_u8_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnt_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svcnt_s16_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnt_u16_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnt_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcnt_s32_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnt_u32_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnt_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcnt_s64_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnt_u64_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnt_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count the number of 8-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rdvl))] +pub fn svcntb() -> u64 { + svcntb_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 16-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnth))] +pub fn svcnth() -> u64 { + svcnth_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 32-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntw)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svcntw() -> u64 { + svcntw_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 64-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svcntd() -> u64 { + svcntd_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 8-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb_pat)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (rdvl , PATTERN = { svpattern :: SV_ALL }))] +# [cfg_attr (test , assert_instr (cntb , PATTERN = { svpattern :: SV_MUL4 }))] +pub fn svcntb_pat() -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntb")] + fn _svcntb_pat(pattern: svpattern) -> i64; + } + unsafe { _svcntb_pat(PATTERN).as_unsigned() } +} +#[doc = "Count the number of 16-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth_pat)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (cnth , PATTERN = { svpattern :: SV_ALL }))] +pub fn svcnth_pat() -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnth")] + fn _svcnth_pat(pattern: svpattern) -> i64; + } + unsafe { _svcnth_pat(PATTERN).as_unsigned() } +} +#[doc = "Count the number of 32-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntw_pat)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (cntw , PATTERN = { svpattern :: SV_ALL }))] +pub fn svcntw_pat() -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntw")] + fn _svcntw_pat(pattern: svpattern) -> i64; + } + unsafe { _svcntw_pat(PATTERN).as_unsigned() } +} +#[doc = "Count the number of 64-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd_pat)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (cntd , PATTERN = { svpattern :: SV_ALL }))] +pub fn svcntd_pat() -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntd")] + fn _svcntd_pat(pattern: svpattern) -> i64; + } + unsafe { _svcntd_pat(PATTERN).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b8(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv16i1")] + fn _svcntp_b8(pg: svbool_t, op: svbool_t) -> i64; + } + unsafe { _svcntp_b8(pg, op).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b16(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv8i1")] + fn _svcntp_b16(pg: svbool8_t, op: svbool8_t) -> i64; + } + unsafe { _svcntp_b16(pg.sve_into(), op.sve_into()).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b32(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv4i1")] + fn _svcntp_b32(pg: svbool4_t, op: svbool4_t) -> i64; + } + unsafe { _svcntp_b32(pg.sve_into(), op.sve_into()).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b64(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv2i1")] + fn _svcntp_b64(pg: svbool2_t, op: svbool2_t) -> i64; + } + unsafe { _svcntp_b64(pg.sve_into(), op.sve_into()).as_unsigned() } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_f32(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv4f32" + )] + fn _svcompact_f32(pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svcompact_f32(pg.sve_into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_f64(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv2f64" + )] + fn _svcompact_f64(pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svcompact_f64(pg.sve_into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_s32(pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv4i32" + )] + fn _svcompact_s32(pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcompact_s32(pg.sve_into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_s64(pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv2i64" + )] + fn _svcompact_s64(pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcompact_s64(pg.sve_into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_u32(pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcompact_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_u64(pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcompact_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_f32(x0: svfloat32_t, x1: svfloat32_t) -> svfloat32x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_f64(x0: svfloat64_t, x1: svfloat64_t) -> svfloat64x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_s8(x0: svint8_t, x1: svint8_t) -> svint8x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_s16(x0: svint16_t, x1: svint16_t) -> svint16x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_s32(x0: svint32_t, x1: svint32_t) -> svint32x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_s64(x0: svint64_t, x1: svint64_t) -> svint64x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_u8(x0: svuint8_t, x1: svuint8_t) -> svuint8x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_u16(x0: svuint16_t, x1: svuint16_t) -> svuint16x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_u32(x0: svuint32_t, x1: svuint32_t) -> svuint32x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_u64(x0: svuint64_t, x1: svuint64_t) -> svuint64x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_f32(x0: svfloat32_t, x1: svfloat32_t, x2: svfloat32_t) -> svfloat32x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_f64(x0: svfloat64_t, x1: svfloat64_t, x2: svfloat64_t) -> svfloat64x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t) -> svint8x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t) -> svint16x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t) -> svint32x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t) -> svint64x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_u8(x0: svuint8_t, x1: svuint8_t, x2: svuint8_t) -> svuint8x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_u16(x0: svuint16_t, x1: svuint16_t, x2: svuint16_t) -> svuint16x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_u32(x0: svuint32_t, x1: svuint32_t, x2: svuint32_t) -> svuint32x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_u64(x0: svuint64_t, x1: svuint64_t, x2: svuint64_t) -> svuint64x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_f32( + x0: svfloat32_t, + x1: svfloat32_t, + x2: svfloat32_t, + x3: svfloat32_t, +) -> svfloat32x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_f64( + x0: svfloat64_t, + x1: svfloat64_t, + x2: svfloat64_t, + x3: svfloat64_t, +) -> svfloat64x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t, x3: svint8_t) -> svint8x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t, x3: svint16_t) -> svint16x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t, x3: svint32_t) -> svint32x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t, x3: svint64_t) -> svint64x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_u8(x0: svuint8_t, x1: svuint8_t, x2: svuint8_t, x3: svuint8_t) -> svuint8x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_u16( + x0: svuint16_t, + x1: svuint16_t, + x2: svuint16_t, + x3: svuint16_t, +) -> svuint16x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_u32( + x0: svuint32_t, + x1: svuint32_t, + x2: svuint32_t, + x3: svuint32_t, +) -> svuint32x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_u64( + x0: svuint64_t, + x1: svuint64_t, + x2: svuint64_t, + x3: svuint64_t, +) -> svuint64x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f32_f64_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvt.f32f64")] + fn _svcvt_f32_f64_m(inactive: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f32_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe { svcvt_f32_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f32_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvt_f32_f64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f64_f32_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvt.f64f32")] + fn _svcvt_f64_f32_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f64_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe { svcvt_f64_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f64_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + svcvt_f64_f32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool_t, op: svint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32" + )] + fn _svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_x(pg: svbool_t, op: svint32_t) -> svfloat32_t { + unsafe { svcvt_f32_s32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_z(pg: svbool_t, op: svint32_t) -> svfloat32_t { + svcvt_f32_s32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool_t, op: svint64_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f32i64")] + fn _svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_x(pg: svbool_t, op: svint64_t) -> svfloat32_t { + unsafe { svcvt_f32_s64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_z(pg: svbool_t, op: svint64_t) -> svfloat32_t { + svcvt_f32_s64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool_t, op: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32" + )] + fn _svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_u32_m(inactive, pg.sve_into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat32_t { + unsafe { svcvt_f32_u32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat32_t { + svcvt_f32_u32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool_t, op: svuint64_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f32i64")] + fn _svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_u64_m(inactive, pg.sve_into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat32_t { + unsafe { svcvt_f32_u64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat32_t { + svcvt_f32_u64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool_t, op: svint32_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv4i32" + )] + fn _svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_x(pg: svbool_t, op: svint32_t) -> svfloat64_t { + unsafe { svcvt_f64_s32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_z(pg: svbool_t, op: svint32_t) -> svfloat64_t { + svcvt_f64_s32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool_t, op: svint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64" + )] + fn _svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_x(pg: svbool_t, op: svint64_t) -> svfloat64_t { + unsafe { svcvt_f64_s64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_z(pg: svbool_t, op: svint64_t) -> svfloat64_t { + svcvt_f64_s64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool_t, op: svuint32_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv4i32" + )] + fn _svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_u32_m(inactive, pg.sve_into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat64_t { + unsafe { svcvt_f64_u32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat64_t { + svcvt_f64_u32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool_t, op: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64" + )] + fn _svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_u64_m(inactive, pg.sve_into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat64_t { + unsafe { svcvt_f64_u64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat64_t { + svcvt_f64_u64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f32_m(inactive: svint32_t, pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i32f32")] + fn _svcvt_s32_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svcvt_s32_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f32_x(pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe { svcvt_s32_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f32_z(pg: svbool_t, op: svfloat32_t) -> svint32_t { + svcvt_s32_f32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f64_m(inactive: svint32_t, pg: svbool_t, op: svfloat64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i32f64")] + fn _svcvt_s32_f64_m(inactive: svint32_t, pg: svbool2_t, op: svfloat64_t) -> svint32_t; + } + unsafe { _svcvt_s32_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f64_x(pg: svbool_t, op: svfloat64_t) -> svint32_t { + unsafe { svcvt_s32_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f64_z(pg: svbool_t, op: svfloat64_t) -> svint32_t { + svcvt_s32_f64_m(svdup_n_s32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f32_m(inactive: svint64_t, pg: svbool_t, op: svfloat32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i64f32")] + fn _svcvt_s64_f32_m(inactive: svint64_t, pg: svbool2_t, op: svfloat32_t) -> svint64_t; + } + unsafe { _svcvt_s64_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f32_x(pg: svbool_t, op: svfloat32_t) -> svint64_t { + unsafe { svcvt_s64_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f32_z(pg: svbool_t, op: svfloat32_t) -> svint64_t { + svcvt_s64_f32_m(svdup_n_s64(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f64_m(inactive: svint64_t, pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i64f64")] + fn _svcvt_s64_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svcvt_s64_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f64_x(pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe { svcvt_s64_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f64_z(pg: svbool_t, op: svfloat64_t) -> svint64_t { + svcvt_s64_f64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f32_m(inactive: svuint32_t, pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i32f32")] + fn _svcvt_u32_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svcvt_u32_f32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe { svcvt_u32_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + svcvt_u32_f32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f64_m(inactive: svuint32_t, pg: svbool_t, op: svfloat64_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i32f64")] + fn _svcvt_u32_f64_m(inactive: svint32_t, pg: svbool2_t, op: svfloat64_t) -> svint32_t; + } + unsafe { _svcvt_u32_f64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint32_t { + unsafe { svcvt_u32_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint32_t { + svcvt_u32_f64_m(svdup_n_u32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f32_m(inactive: svuint64_t, pg: svbool_t, op: svfloat32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i64f32")] + fn _svcvt_u64_f32_m(inactive: svint64_t, pg: svbool2_t, op: svfloat32_t) -> svint64_t; + } + unsafe { _svcvt_u64_f32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint64_t { + unsafe { svcvt_u64_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint64_t { + svcvt_u64_f32_m(svdup_n_u64(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f64_m(inactive: svuint64_t, pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i64f64")] + fn _svcvt_u64_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svcvt_u64_f64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe { svcvt_u64_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + svcvt_u64_f64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdiv.nxv4f32")] + fn _svdiv_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svdiv_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdiv_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdiv_f32_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdiv_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdiv_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdiv_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdiv.nxv2f64")] + fn _svdiv_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svdiv_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdiv_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdiv_f64_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdiv_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdiv_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdiv_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv4i32")] + fn _svdiv_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdiv_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdiv_s32_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdiv_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv2i64")] + fn _svdiv_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdiv_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdiv_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdiv_s64_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdiv_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdiv_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdiv_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udiv.nxv4i32")] + fn _svdiv_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdiv_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdiv_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdiv_u32_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdiv_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdiv_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdiv_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udiv.nxv2i64")] + fn _svdiv_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdiv_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdiv_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdiv_u64_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdiv_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdiv_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdiv_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdivr.nxv4f32")] + fn _svdivr_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svdivr_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdivr_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdivr_f32_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdivr_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdivr_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdivr_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdivr.nxv2f64")] + fn _svdivr_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svdivr_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdivr_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdivr_f64_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdivr_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdivr_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdivr_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdivr.nxv4i32")] + fn _svdivr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdivr_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdivr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdivr_s32_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdivr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdivr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdivr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdivr.nxv2i64")] + fn _svdivr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdivr_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdivr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdivr_s64_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdivr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdivr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdivr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udivr.nxv4i32")] + fn _svdivr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdivr_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdivr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdivr_u32_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdivr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdivr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdivr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udivr.nxv2i64")] + fn _svdivr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdivr_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdivr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdivr_u64_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdivr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdivr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdivr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdot, IMM_INDEX = 0))] +pub fn svdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sdot.lane.nxv4i32" + )] + fn _svdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { _svdot_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdot, IMM_INDEX = 0))] +pub fn svdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sdot.lane.nxv2i64" + )] + fn _svdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + ) -> svint64_t; + } + unsafe { _svdot_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udot, IMM_INDEX = 0))] +pub fn svdot_lane_u32( + op1: svuint32_t, + op2: svuint8_t, + op3: svuint8_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.udot.lane.nxv4i32" + )] + fn _svdot_lane_u32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { + _svdot_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX).as_unsigned() + } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udot, IMM_INDEX = 0))] +pub fn svdot_lane_u64( + op1: svuint64_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.udot.lane.nxv2i64" + )] + fn _svdot_lane_u64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + ) -> svint64_t; + } + unsafe { + _svdot_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX).as_unsigned() + } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdot.nxv4i32")] + fn _svdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svdot_s32(op1, op2, op3) } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_n_s32(op1: svint32_t, op2: svint8_t, op3: i8) -> svint32_t { + svdot_s32(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_s64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdot.nxv2i64")] + fn _svdot_s64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t; + } + unsafe { _svdot_s64(op1, op2, op3) } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_n_s64(op1: svint64_t, op2: svint16_t, op3: i16) -> svint64_t { + svdot_s64(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_u32(op1: svuint32_t, op2: svuint8_t, op3: svuint8_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udot.nxv4i32")] + fn _svdot_u32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svdot_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_n_u32(op1: svuint32_t, op2: svuint8_t, op3: u8) -> svuint32_t { + svdot_u32(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_u64(op1: svuint64_t, op2: svuint16_t, op3: svuint16_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udot.nxv2i64")] + fn _svdot_u64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t; + } + unsafe { _svdot_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_n_u64(op1: svuint64_t, op2: svuint16_t, op3: u16) -> svuint64_t { + svdot_u64(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_f32(data: svfloat32_t, index: u32) -> svfloat32_t { + svtbl_f32(data, svdup_n_u32(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_f64(data: svfloat64_t, index: u64) -> svfloat64_t { + svtbl_f64(data, svdup_n_u64(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s8(data: svint8_t, index: u8) -> svint8_t { + svtbl_s8(data, svdup_n_u8(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s16(data: svint16_t, index: u16) -> svint16_t { + svtbl_s16(data, svdup_n_u16(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s32(data: svint32_t, index: u32) -> svint32_t { + svtbl_s32(data, svdup_n_u32(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s64(data: svint64_t, index: u64) -> svint64_t { + svtbl_s64(data, svdup_n_u64(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u8(data: svuint8_t, index: u8) -> svuint8_t { + svtbl_u8(data, svdup_n_u8(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u16(data: svuint16_t, index: u16) -> svuint16_t { + svtbl_u16(data, svdup_n_u16(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u32(data: svuint32_t, index: u32) -> svuint32_t { + svtbl_u32(data, svdup_n_u32(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u64(data: svuint64_t, index: u64) -> svuint64_t { + svtbl_u64(data, svdup_n_u64(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b8(op: bool) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i1")] + fn _svdup_n_b8(op: bool) -> svbool_t; + } + unsafe { _svdup_n_b8(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b16(op: bool) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i1")] + fn _svdup_n_b16(op: bool) -> svbool8_t; + } + unsafe { _svdup_n_b16(op).sve_into() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b32(op: bool) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i1")] + fn _svdup_n_b32(op: bool) -> svbool4_t; + } + unsafe { _svdup_n_b32(op).sve_into() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b64(op: bool) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i1")] + fn _svdup_n_b64(op: bool) -> svbool2_t; + } + unsafe { _svdup_n_b64(op).sve_into() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32(op: f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4f32")] + fn _svdup_n_f32(op: f32) -> svfloat32_t; + } + unsafe { _svdup_n_f32(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64(op: f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2f64")] + fn _svdup_n_f64(op: f64) -> svfloat64_t; + } + unsafe { _svdup_n_f64(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8(op: i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i8")] + fn _svdup_n_s8(op: i8) -> svint8_t; + } + unsafe { _svdup_n_s8(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16(op: i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i16")] + fn _svdup_n_s16(op: i16) -> svint16_t; + } + unsafe { _svdup_n_s16(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32(op: i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] + fn _svdup_n_s32(op: i32) -> svint32_t; + } + unsafe { _svdup_n_s32(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64(op: i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i64")] + fn _svdup_n_s64(op: i64) -> svint64_t; + } + unsafe { _svdup_n_s64(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8(op: u8) -> svuint8_t { + unsafe { svdup_n_s8(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16(op: u16) -> svuint16_t { + unsafe { svdup_n_s16(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32(op: u32) -> svuint32_t { + unsafe { svdup_n_s32(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64(op: u64) -> svuint64_t { + unsafe { svdup_n_s64(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32_m(inactive: svfloat32_t, pg: svbool_t, op: f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv4f32")] + fn _svdup_n_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: f32) -> svfloat32_t; + } + unsafe { _svdup_n_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32_x(pg: svbool_t, op: f32) -> svfloat32_t { + svdup_n_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32_z(pg: svbool_t, op: f32) -> svfloat32_t { + svdup_n_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64_m(inactive: svfloat64_t, pg: svbool_t, op: f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv2f64")] + fn _svdup_n_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: f64) -> svfloat64_t; + } + unsafe { _svdup_n_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64_x(pg: svbool_t, op: f64) -> svfloat64_t { + svdup_n_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64_z(pg: svbool_t, op: f64) -> svfloat64_t { + svdup_n_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8_m(inactive: svint8_t, pg: svbool_t, op: i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv16i8")] + fn _svdup_n_s8_m(inactive: svint8_t, pg: svbool_t, op: i8) -> svint8_t; + } + unsafe { _svdup_n_s8_m(inactive, pg, op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8_x(pg: svbool_t, op: i8) -> svint8_t { + svdup_n_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8_z(pg: svbool_t, op: i8) -> svint8_t { + svdup_n_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16_m(inactive: svint16_t, pg: svbool_t, op: i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv8i16")] + fn _svdup_n_s16_m(inactive: svint16_t, pg: svbool8_t, op: i16) -> svint16_t; + } + unsafe { _svdup_n_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16_x(pg: svbool_t, op: i16) -> svint16_t { + svdup_n_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16_z(pg: svbool_t, op: i16) -> svint16_t { + svdup_n_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32_m(inactive: svint32_t, pg: svbool_t, op: i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv4i32")] + fn _svdup_n_s32_m(inactive: svint32_t, pg: svbool4_t, op: i32) -> svint32_t; + } + unsafe { _svdup_n_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32_x(pg: svbool_t, op: i32) -> svint32_t { + svdup_n_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32_z(pg: svbool_t, op: i32) -> svint32_t { + svdup_n_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64_m(inactive: svint64_t, pg: svbool_t, op: i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv2i64")] + fn _svdup_n_s64_m(inactive: svint64_t, pg: svbool2_t, op: i64) -> svint64_t; + } + unsafe { _svdup_n_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64_x(pg: svbool_t, op: i64) -> svint64_t { + svdup_n_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64_z(pg: svbool_t, op: i64) -> svint64_t { + svdup_n_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8_m(inactive: svuint8_t, pg: svbool_t, op: u8) -> svuint8_t { + unsafe { svdup_n_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8_x(pg: svbool_t, op: u8) -> svuint8_t { + svdup_n_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8_z(pg: svbool_t, op: u8) -> svuint8_t { + svdup_n_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16_m(inactive: svuint16_t, pg: svbool_t, op: u16) -> svuint16_t { + unsafe { svdup_n_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16_x(pg: svbool_t, op: u16) -> svuint16_t { + svdup_n_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16_z(pg: svbool_t, op: u16) -> svuint16_t { + svdup_n_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32_m(inactive: svuint32_t, pg: svbool_t, op: u32) -> svuint32_t { + unsafe { svdup_n_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32_x(pg: svbool_t, op: u32) -> svuint32_t { + svdup_n_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32_z(pg: svbool_t, op: u32) -> svuint32_t { + svdup_n_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64_m(inactive: svuint64_t, pg: svbool_t, op: u64) -> svuint64_t { + unsafe { svdup_n_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64_x(pg: svbool_t, op: u64) -> svuint64_t { + svdup_n_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64_z(pg: svbool_t, op: u64) -> svuint64_t { + svdup_n_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_f32(data: svfloat32_t, index: u64) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv4f32" + )] + fn _svdupq_lane_f32(data: svfloat32_t, index: i64) -> svfloat32_t; + } + unsafe { _svdupq_lane_f32(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_f64(data: svfloat64_t, index: u64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv2f64" + )] + fn _svdupq_lane_f64(data: svfloat64_t, index: i64) -> svfloat64_t; + } + unsafe { _svdupq_lane_f64(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s8(data: svint8_t, index: u64) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv16i8" + )] + fn _svdupq_lane_s8(data: svint8_t, index: i64) -> svint8_t; + } + unsafe { _svdupq_lane_s8(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s16(data: svint16_t, index: u64) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv8i16" + )] + fn _svdupq_lane_s16(data: svint16_t, index: i64) -> svint16_t; + } + unsafe { _svdupq_lane_s16(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s32(data: svint32_t, index: u64) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv4i32" + )] + fn _svdupq_lane_s32(data: svint32_t, index: i64) -> svint32_t; + } + unsafe { _svdupq_lane_s32(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s64(data: svint64_t, index: u64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv2i64" + )] + fn _svdupq_lane_s64(data: svint64_t, index: i64) -> svint64_t; + } + unsafe { _svdupq_lane_s64(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u8(data: svuint8_t, index: u64) -> svuint8_t { + unsafe { svdupq_lane_s8(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u16(data: svuint16_t, index: u64) -> svuint16_t { + unsafe { svdupq_lane_s16(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u32(data: svuint32_t, index: u64) -> svuint32_t { + unsafe { svdupq_lane_s32(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u64(data: svuint64_t, index: u64) -> svuint64_t { + unsafe { svdupq_lane_s64(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_b16( + x0: bool, + x1: bool, + x2: bool, + x3: bool, + x4: bool, + x5: bool, + x6: bool, + x7: bool, +) -> svbool_t { + let op1 = svdupq_n_s16( + x0 as i16, x1 as i16, x2 as i16, x3 as i16, x4 as i16, x5 as i16, x6 as i16, x7 as i16, + ); + svcmpne_wide_s16(svptrue_b16(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_b32(x0: bool, x1: bool, x2: bool, x3: bool) -> svbool_t { + let op1 = svdupq_n_s32(x0 as i32, x1 as i32, x2 as i32, x3 as i32); + svcmpne_wide_s32(svptrue_b32(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_b64(x0: bool, x1: bool) -> svbool_t { + let op1 = svdupq_n_s64(x0 as i64, x1 as i64); + svcmpne_s64(svptrue_b64(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_b8( + x0: bool, + x1: bool, + x2: bool, + x3: bool, + x4: bool, + x5: bool, + x6: bool, + x7: bool, + x8: bool, + x9: bool, + x10: bool, + x11: bool, + x12: bool, + x13: bool, + x14: bool, + x15: bool, +) -> svbool_t { + let op1 = svdupq_n_s8( + x0 as i8, x1 as i8, x2 as i8, x3 as i8, x4 as i8, x5 as i8, x6 as i8, x7 as i8, x8 as i8, + x9 as i8, x10 as i8, x11 as i8, x12 as i8, x13 as i8, x14 as i8, x15 as i8, + ); + svcmpne_wide_s8(svptrue_b8(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_f32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_f32(x0: f32, x1: f32, x2: f32, x3: f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv4f32.v4f32" + )] + fn _svdupq_n_f32(op0: svfloat32_t, op1: float32x4_t, idx: i64) -> svfloat32_t; + } + unsafe { + let op = _svdupq_n_f32(svundef_f32(), crate::mem::transmute([x0, x1, x2, x3]), 0); + svdupq_lane_f32(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_s32(x0: i32, x1: i32, x2: i32, x3: i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv4i32.v4i32" + )] + fn _svdupq_n_s32(op0: svint32_t, op1: int32x4_t, idx: i64) -> svint32_t; + } + unsafe { + let op = _svdupq_n_s32(svundef_s32(), crate::mem::transmute([x0, x1, x2, x3]), 0); + svdupq_lane_s32(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_u32(x0: u32, x1: u32, x2: u32, x3: u32) -> svuint32_t { + unsafe { + svdupq_n_s32( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_f64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_f64(x0: f64, x1: f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv2f64.v2f64" + )] + fn _svdupq_n_f64(op0: svfloat64_t, op1: float64x2_t, idx: i64) -> svfloat64_t; + } + unsafe { + let op = _svdupq_n_f64(svundef_f64(), crate::mem::transmute([x0, x1]), 0); + svdupq_lane_f64(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_s64(x0: i64, x1: i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv2i64.v2i64" + )] + fn _svdupq_n_s64(op0: svint64_t, op1: int64x2_t, idx: i64) -> svint64_t; + } + unsafe { + let op = _svdupq_n_s64(svundef_s64(), crate::mem::transmute([x0, x1]), 0); + svdupq_lane_s64(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_u64(x0: u64, x1: u64) -> svuint64_t { + unsafe { svdupq_n_s64(x0.as_signed(), x1.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_s16( + x0: i16, + x1: i16, + x2: i16, + x3: i16, + x4: i16, + x5: i16, + x6: i16, + x7: i16, +) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv8i16.v8i16" + )] + fn _svdupq_n_s16(op0: svint16_t, op1: int16x8_t, idx: i64) -> svint16_t; + } + unsafe { + let op = _svdupq_n_s16( + svundef_s16(), + crate::mem::transmute([x0, x1, x2, x3, x4, x5, x6, x7]), + 0, + ); + svdupq_lane_s16(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_u16( + x0: u16, + x1: u16, + x2: u16, + x3: u16, + x4: u16, + x5: u16, + x6: u16, + x7: u16, +) -> svuint16_t { + unsafe { + svdupq_n_s16( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + x4.as_signed(), + x5.as_signed(), + x6.as_signed(), + x7.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_s8( + x0: i8, + x1: i8, + x2: i8, + x3: i8, + x4: i8, + x5: i8, + x6: i8, + x7: i8, + x8: i8, + x9: i8, + x10: i8, + x11: i8, + x12: i8, + x13: i8, + x14: i8, + x15: i8, +) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv16i8.v16i8" + )] + fn _svdupq_n_s8(op0: svint8_t, op1: int8x16_t, idx: i64) -> svint8_t; + } + unsafe { + let op = _svdupq_n_s8( + svundef_s8(), + crate::mem::transmute([ + x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, + ]), + 0, + ); + svdupq_lane_s8(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_u8( + x0: u8, + x1: u8, + x2: u8, + x3: u8, + x4: u8, + x5: u8, + x6: u8, + x7: u8, + x8: u8, + x9: u8, + x10: u8, + x11: u8, + x12: u8, + x13: u8, + x14: u8, + x15: u8, +) -> svuint8_t { + unsafe { + svdupq_n_s8( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + x4.as_signed(), + x5.as_signed(), + x6.as_signed(), + x7.as_signed(), + x8.as_signed(), + x9.as_signed(), + x10.as_signed(), + x11.as_signed(), + x12.as_signed(), + x13.as_signed(), + x14.as_signed(), + x15.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.z.nvx16i1")] + fn _sveor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _sveor_b_z(pg, op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv16i8")] + fn _sveor_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _sveor_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + sveor_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + sveor_s8_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + sveor_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + sveor_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + sveor_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv8i16")] + fn _sveor_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _sveor_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + sveor_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + sveor_s16_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + sveor_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + sveor_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + sveor_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv4i32")] + fn _sveor_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _sveor_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + sveor_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + sveor_s32_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + sveor_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + sveor_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + sveor_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv2i64")] + fn _sveor_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _sveor_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + sveor_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + sveor_s64_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + sveor_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + sveor_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + sveor_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { sveor_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveor_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + sveor_u8_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveor_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + sveor_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveor_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { sveor_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveor_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + sveor_u16_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveor_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + sveor_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveor_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { sveor_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveor_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + sveor_u32_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveor_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + sveor_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveor_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { sveor_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveor_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + sveor_u64_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveor_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + sveor_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveor_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv16i8")] + fn _sveorv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _sveorv_s8(pg, op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv8i16")] + fn _sveorv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _sveorv_s16(pg.sve_into(), op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv4i32")] + fn _sveorv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _sveorv_s32(pg.sve_into(), op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv2i64")] + fn _sveorv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _sveorv_s64(pg.sve_into(), op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { sveorv_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { sveorv_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { sveorv_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { sveorv_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Floating-point exponential accelerator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexpa[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fexpa))] +pub fn svexpa_f32(op: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fexpa.x.nxv4f32 " + )] + fn _svexpa_f32(op: svint32_t) -> svfloat32_t; + } + unsafe { _svexpa_f32(op.as_signed()) } +} +#[doc = "Floating-point exponential accelerator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexpa[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fexpa))] +pub fn svexpa_f64(op: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fexpa.x.nxv2f64 " + )] + fn _svexpa_f64(op: svint64_t) -> svfloat64_t; + } + unsafe { _svexpa_f64(op.as_signed()) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + static_assert_range!(IMM3, 0..=63); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv4f32")] + fn _svext_f32(op1: svfloat32_t, op2: svfloat32_t, imm3: i32) -> svfloat32_t; + } + unsafe { _svext_f32(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + static_assert_range!(IMM3, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv2f64")] + fn _svext_f64(op1: svfloat64_t, op2: svfloat64_t, imm3: i32) -> svfloat64_t; + } + unsafe { _svext_f64(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 0..=255); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv16i8")] + fn _svext_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svext_s8(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 0..=127); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv8i16")] + fn _svext_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svext_s16(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 0..=63); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv4i32")] + fn _svext_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svext_s32(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv2i64")] + fn _svext_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svext_s64(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 0..=255); + unsafe { svext_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 0..=127); + unsafe { svext_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 0..=63); + unsafe { svext_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 0..=31); + unsafe { svext_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv8i16")] + fn _svextb_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svextb_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svextb_s16_m(op, pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svextb_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv4i32")] + fn _svextb_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svextb_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svextb_s32_m(op, pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svextb_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxth.nxv4i32")] + fn _svexth_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svexth_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svexth_s32_m(op, pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svexth_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv2i64")] + fn _svextb_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextb_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svextb_s64_m(op, pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svextb_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxth.nxv2i64")] + fn _svexth_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svexth_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svexth_s64_m(op, pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svexth_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Sign-extend the low 32 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtw))] +pub fn svextw_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtw.nxv2i64")] + fn _svextw_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextw_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Sign-extend the low 32 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtw))] +pub fn svextw_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svextw_s64_m(op, pg, op) +} +#[doc = "Sign-extend the low 32 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtw))] +pub fn svextw_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svextw_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv8i16")] + fn _svextb_u16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svextb_u16_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svextb_u16_m(op, pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svextb_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv4i32")] + fn _svextb_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svextb_u32_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svextb_u32_m(op, pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svextb_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxth.nxv4i32")] + fn _svexth_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svexth_u32_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svexth_u32_m(op, pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svexth_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv2i64")] + fn _svextb_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextb_u64_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextb_u64_m(op, pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextb_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxth.nxv2i64")] + fn _svexth_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svexth_u64_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svexth_u64_m(op, pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svexth_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Zero-extend the low 32 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtw))] +pub fn svextw_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtw.nxv2i64")] + fn _svextw_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextw_u64_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 32 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtw))] +pub fn svextw_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextw_u64_m(op, pg, op) +} +#[doc = "Zero-extend the low 32 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtw))] +pub fn svextw_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextw_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_f32(tuple: svfloat32x2_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_f64(tuple: svfloat64x2_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_s8(tuple: svint8x2_t) -> svint8_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_s16(tuple: svint16x2_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_s32(tuple: svint32x2_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_s64(tuple: svint64x2_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_u8(tuple: svuint8x2_t) -> svuint8_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_u16(tuple: svuint16x2_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_u32(tuple: svuint32x2_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_u64(tuple: svuint64x2_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_f32(tuple: svfloat32x3_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_f64(tuple: svfloat64x3_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_s8(tuple: svint8x3_t) -> svint8_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_s16(tuple: svint16x3_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_s32(tuple: svint32x3_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_s64(tuple: svint64x3_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_u8(tuple: svuint8x3_t) -> svuint8_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_u16(tuple: svuint16x3_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_u32(tuple: svuint32x3_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_u64(tuple: svuint64x3_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_f32(tuple: svfloat32x4_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_f64(tuple: svfloat64x4_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_s8(tuple: svint8x4_t) -> svint8_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_s16(tuple: svint16x4_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_s32(tuple: svint32x4_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_s64(tuple: svint64x4_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_u8(tuple: svuint8x4_t) -> svuint8_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_u16(tuple: svuint16x4_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_u32(tuple: svuint32x4_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_u64(tuple: svuint64x4_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s8(base: i8, step: i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv16i8")] + fn _svindex_s8(base: i8, step: i8) -> svint8_t; + } + unsafe { _svindex_s8(base, step) } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s16(base: i16, step: i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv8i16")] + fn _svindex_s16(base: i16, step: i16) -> svint16_t; + } + unsafe { _svindex_s16(base, step) } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s32(base: i32, step: i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv4i32")] + fn _svindex_s32(base: i32, step: i32) -> svint32_t; + } + unsafe { _svindex_s32(base, step) } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s64(base: i64, step: i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv2i64")] + fn _svindex_s64(base: i64, step: i64) -> svint64_t; + } + unsafe { _svindex_s64(base, step) } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u8(base: u8, step: u8) -> svuint8_t { + unsafe { svindex_s8(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u16(base: u16, step: u16) -> svuint16_t { + unsafe { svindex_s16(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u32(base: u32, step: u32) -> svuint32_t { + unsafe { svindex_s32(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u64(base: u64, step: u64) -> svuint64_t { + unsafe { svindex_s64(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_f32(op1: svfloat32_t, op2: f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv4f32")] + fn _svinsr_n_f32(op1: svfloat32_t, op2: f32) -> svfloat32_t; + } + unsafe { _svinsr_n_f32(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_f64(op1: svfloat64_t, op2: f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv2f64")] + fn _svinsr_n_f64(op1: svfloat64_t, op2: f64) -> svfloat64_t; + } + unsafe { _svinsr_n_f64(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv16i8")] + fn _svinsr_n_s8(op1: svint8_t, op2: i8) -> svint8_t; + } + unsafe { _svinsr_n_s8(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv8i16")] + fn _svinsr_n_s16(op1: svint16_t, op2: i16) -> svint16_t; + } + unsafe { _svinsr_n_s16(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv4i32")] + fn _svinsr_n_s32(op1: svint32_t, op2: i32) -> svint32_t; + } + unsafe { _svinsr_n_s32(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv2i64")] + fn _svinsr_n_s64(op1: svint64_t, op2: i64) -> svint64_t; + } + unsafe { _svinsr_n_s64(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + unsafe { svinsr_n_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + unsafe { svinsr_n_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + unsafe { svinsr_n_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + unsafe { svinsr_n_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv4f32")] + fn _svlasta_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svlasta_f32(pg.sve_into(), op) } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv2f64")] + fn _svlasta_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svlasta_f64(pg.sve_into(), op) } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv16i8")] + fn _svlasta_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svlasta_s8(pg, op) } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv8i16")] + fn _svlasta_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svlasta_s16(pg.sve_into(), op) } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv4i32")] + fn _svlasta_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svlasta_s32(pg.sve_into(), op) } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv2i64")] + fn _svlasta_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svlasta_s64(pg.sve_into(), op) } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svlasta_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svlasta_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svlasta_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svlasta_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv4f32")] + fn _svlastb_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svlastb_f32(pg.sve_into(), op) } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv2f64")] + fn _svlastb_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svlastb_f64(pg.sve_into(), op) } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv16i8")] + fn _svlastb_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svlastb_s8(pg, op) } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv8i16")] + fn _svlastb_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svlastb_s16(pg.sve_into(), op) } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv4i32")] + fn _svlastb_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svlastb_s32(pg.sve_into(), op) } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv2i64")] + fn _svlastb_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svlastb_s64(pg.sve_into(), op) } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svlastb_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svlastb_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svlastb_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svlastb_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4f32")] + fn _svld1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svld1_f32(pg.sve_into(), base) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2f64")] + fn _svld1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svld1_f64(pg.sve_into(), base) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv16i8")] + fn _svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svld1_s8(pg, base) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i16")] + fn _svld1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svld1_s16(pg.sve_into(), base) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i32")] + fn _svld1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svld1_s32(pg.sve_into(), base) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i64")] + fn _svld1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svld1_s64(pg.sve_into(), base) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svld1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svld1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svld1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svld1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32index_f32( + pg: svbool_t, + base: *const f32, + indices: svint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4f32" + )] + fn _svld1_gather_s32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_s32index_f32(pg.sve_into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32index_s32( + pg: svbool_t, + base: *const i32, + indices: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32" + )] + fn _svld1_gather_s32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svld1_gather_s32index_s32(pg.sve_into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32index_u32( + pg: svbool_t, + base: *const u32, + indices: svint32_t, +) -> svuint32_t { + svld1_gather_s32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64index_f64( + pg: svbool_t, + base: *const f64, + indices: svint64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2f64" + )] + fn _svld1_gather_s64index_f64( + pg: svbool2_t, + base: *const f64, + indices: svint64_t, + ) -> svfloat64_t; + } + _svld1_gather_s64index_f64(pg.sve_into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64index_s64( + pg: svbool_t, + base: *const i64, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i64" + )] + fn _svld1_gather_s64index_s64( + pg: svbool2_t, + base: *const i64, + indices: svint64_t, + ) -> svint64_t; + } + _svld1_gather_s64index_s64(pg.sve_into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64index_u64( + pg: svbool_t, + base: *const u64, + indices: svint64_t, +) -> svuint64_t { + svld1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32index_f32( + pg: svbool_t, + base: *const f32, + indices: svuint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4f32" + )] + fn _svld1_gather_u32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_u32index_f32(pg.sve_into(), base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32index_s32( + pg: svbool_t, + base: *const i32, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32" + )] + fn _svld1_gather_u32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svld1_gather_u32index_s32(pg.sve_into(), base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32index_u32( + pg: svbool_t, + base: *const u32, + indices: svuint32_t, +) -> svuint32_t { + svld1_gather_u32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64index_f64( + pg: svbool_t, + base: *const f64, + indices: svuint64_t, +) -> svfloat64_t { + svld1_gather_s64index_f64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64index_s64( + pg: svbool_t, + base: *const i64, + indices: svuint64_t, +) -> svint64_t { + svld1_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64index_u64( + pg: svbool_t, + base: *const u64, + indices: svuint64_t, +) -> svuint64_t { + svld1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32" + )] + fn _svld1_gather_s32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_s32offset_f32(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32" + )] + fn _svld1_gather_s32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svld1_gather_s32offset_s32(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svint32_t, +) -> svuint32_t { + svld1_gather_s32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svint64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2f64" + )] + fn _svld1_gather_s64offset_f64( + pg: svbool2_t, + base: *const f64, + offsets: svint64_t, + ) -> svfloat64_t; + } + _svld1_gather_s64offset_f64(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i64" + )] + fn _svld1_gather_s64offset_s64( + pg: svbool2_t, + base: *const i64, + offsets: svint64_t, + ) -> svint64_t; + } + _svld1_gather_s64offset_s64(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svint64_t, +) -> svuint64_t { + svld1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svuint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32" + )] + fn _svld1_gather_u32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_u32offset_f32(pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32" + )] + fn _svld1_gather_u32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svld1_gather_u32offset_s32(pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svuint32_t, +) -> svuint32_t { + svld1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svuint64_t, +) -> svfloat64_t { + svld1_gather_s64offset_f64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svuint64_t, +) -> svint64_t { + svld1_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svuint64_t, +) -> svuint64_t { + svld1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t { + svld1_gather_u32base_offset_f32(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t { + svld1_gather_u64base_offset_f64(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svfloat32_t { + svld1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svld1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svld1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svfloat64_t { + svld1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svld1_gather_u32base_offset_f32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svfloat32_t; + } + _svld1_gather_u32base_offset_f32(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svld1_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svint32_t; + } + _svld1_gather_u32base_offset_s32(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svld1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svld1_gather_u64base_offset_f64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svfloat64_t; + } + _svld1_gather_u64base_offset_f64(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svld1_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svint64_t; + } + _svld1_gather_u64base_offset_s64(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svld1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svld1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svld1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svld1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svld1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svld1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svld1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svld1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svld1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svld1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1row))] +pub unsafe fn svld1ro_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv4f32")] + fn _svld1ro_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svld1ro_f32(pg.sve_into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rod))] +pub unsafe fn svld1ro_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv2f64")] + fn _svld1ro_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svld1ro_f64(pg.sve_into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rob))] +pub unsafe fn svld1ro_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv16i8")] + fn _svld1ro_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svld1ro_s8(pg, base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1roh))] +pub unsafe fn svld1ro_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv8i16")] + fn _svld1ro_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svld1ro_s16(pg.sve_into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1row))] +pub unsafe fn svld1ro_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv4i32")] + fn _svld1ro_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svld1ro_s32(pg.sve_into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rod))] +pub unsafe fn svld1ro_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv2i64")] + fn _svld1ro_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svld1ro_s64(pg.sve_into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rob))] +pub unsafe fn svld1ro_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svld1ro_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1roh))] +pub unsafe fn svld1ro_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svld1ro_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1row))] +pub unsafe fn svld1ro_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svld1ro_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rod))] +pub unsafe fn svld1ro_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svld1ro_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqw))] +pub unsafe fn svld1rq_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv4f32")] + fn _svld1rq_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svld1rq_f32(pg.sve_into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqd))] +pub unsafe fn svld1rq_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv2f64")] + fn _svld1rq_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svld1rq_f64(pg.sve_into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqb))] +pub unsafe fn svld1rq_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv16i8")] + fn _svld1rq_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svld1rq_s8(pg, base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqh))] +pub unsafe fn svld1rq_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv8i16")] + fn _svld1rq_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svld1rq_s16(pg.sve_into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqw))] +pub unsafe fn svld1rq_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv4i32")] + fn _svld1rq_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svld1rq_s32(pg.sve_into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqd))] +pub unsafe fn svld1rq_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv2i64")] + fn _svld1rq_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svld1rq_s64(pg.sve_into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqb))] +pub unsafe fn svld1rq_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svld1rq_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqh))] +pub unsafe fn svld1rq_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svld1rq_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqw))] +pub unsafe fn svld1rq_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svld1rq_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqd))] +pub unsafe fn svld1rq_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svld1rq_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8" + )] + fn _svld1sb_gather_s32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sb_gather_s32offset_s32( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16" + )] + fn _svld1sh_gather_s32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sh_gather_s32offset_s32( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svuint32_t { + svld1sb_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svuint32_t { + svld1sh_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i8" + )] + fn _svld1sb_gather_s64offset_s64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sb_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i16" + )] + fn _svld1sh_gather_s64offset_s64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sh_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i32" + )] + fn _svld1sw_gather_s64offset_s64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sw_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svuint64_t { + svld1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svuint64_t { + svld1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svuint64_t { + svld1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8" + )] + fn _svld1sb_gather_u32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sb_gather_u32offset_s32( + pg.sve_into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16" + )] + fn _svld1sh_gather_u32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sh_gather_u32offset_s32( + pg.sve_into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svuint32_t { + svld1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svuint32_t { + svld1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svint64_t { + svld1sb_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svint64_t { + svld1sh_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svint64_t { + svld1sw_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svuint64_t { + svld1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svuint64_t { + svld1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svuint64_t { + svld1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svld1sb_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sb_gather_u32base_offset_s32( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svld1sh_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sh_gather_u32base_offset_s32( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svld1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svld1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svld1sb_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sb_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svld1sh_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sh_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svld1sw_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sw_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1sb_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1sh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1sb_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1sh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1sb_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1sh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1sw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1sb_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1sh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1sw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i8")] + fn _svld1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sb_s16(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i8")] + fn _svld1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sb_s32(pg.sve_into(), base)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i16")] + fn _svld1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sh_s32(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i8")] + fn _svld1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sb_s64(pg.sve_into(), base)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i16")] + fn _svld1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sh_s64(pg.sve_into(), base)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i32")] + fn _svld1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sw_s64(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t { + svld1sb_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t { + svld1sb_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t { + svld1sh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t { + svld1sb_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t { + svld1sh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t { + svld1sw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t { + svld1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t { + svld1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t { + svld1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t { + svld1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t { + svld1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t { + svld1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t { + svld1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t { + svld1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t { + svld1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t { + svld1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t { + svld1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t { + svld1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32index_s32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16" + )] + fn _svld1sh_gather_s32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sh_gather_s32index_s32( + pg.sve_into(), + base, + indices, + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32index_u32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svuint32_t { + svld1sh_gather_s32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64index_s64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i16" + )] + fn _svld1sh_gather_s64index_s64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sh_gather_s64index_s64( + pg.sve_into(), + base, + indices, + )) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64index_s64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i32" + )] + fn _svld1sw_gather_s64index_s64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sw_gather_s64index_s64( + pg.sve_into(), + base, + indices, + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64index_u64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svuint64_t { + svld1sh_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64index_u64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svuint64_t { + svld1sw_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32index_s32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16" + )] + fn _svld1sh_gather_u32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svld1sh_gather_u32index_s32( + pg.sve_into(), + base, + indices.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32index_u32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svuint32_t { + svld1sh_gather_u32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64index_s64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svint64_t { + svld1sh_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64index_s64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svint64_t { + svld1sw_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64index_u64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svuint64_t { + svld1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64index_u64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svuint64_t { + svld1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svld1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svld1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svint32_t { + svld1ub_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svint32_t { + svld1uh_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8" + )] + fn _svld1ub_gather_s32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1ub_gather_s32offset_u32(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16" + )] + fn _svld1uh_gather_s32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1uh_gather_s32offset_u32(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svint64_t { + svld1ub_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svint64_t { + svld1uh_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svint64_t { + svld1uw_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i8" + )] + fn _svld1ub_gather_s64offset_u64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1ub_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i16" + )] + fn _svld1uh_gather_s64offset_u64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1uh_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i32" + )] + fn _svld1uw_gather_s64offset_u64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1uw_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svint32_t { + svld1ub_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svint32_t { + svld1uh_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8" + )] + fn _svld1ub_gather_u32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1ub_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16" + )] + fn _svld1uh_gather_u32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1uh_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svint64_t { + svld1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svint64_t { + svld1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svint64_t { + svld1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svuint64_t { + svld1ub_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svuint64_t { + svld1uh_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svuint64_t { + svld1uw_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svld1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svld1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svld1ub_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1ub_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svld1uh_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1uh_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svld1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svld1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svld1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svld1ub_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1ub_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svld1uh_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1uh_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svld1uw_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1uw_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1ub_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1uh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1ub_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1uh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1ub_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1uh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1uw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1ub_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1uh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1uw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i8")] + fn _svld1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1ub_s16(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i8")] + fn _svld1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1ub_s32(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i16")] + fn _svld1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1uh_s32(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i8")] + fn _svld1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1ub_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i16")] + fn _svld1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1uh_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i32")] + fn _svld1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1uw_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t { + svld1ub_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t { + svld1ub_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t { + svld1uh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t { + svld1ub_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t { + svld1uh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t { + svld1uw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t { + svld1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t { + svld1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t { + svld1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t { + svld1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t { + svld1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t { + svld1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t { + svld1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t { + svld1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t { + svld1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t { + svld1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t { + svld1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t { + svld1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32index_s32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svint32_t { + svld1uh_gather_s32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32index_u32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16" + )] + fn _svld1uh_gather_s32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1uh_gather_s32index_u32(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64index_s64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svint64_t { + svld1uh_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64index_s64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svint64_t { + svld1uw_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64index_u64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i16" + )] + fn _svld1uh_gather_s64index_u64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1uh_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64index_u64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i32" + )] + fn _svld1uw_gather_s64index_u64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1uw_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32index_s32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svint32_t { + svld1uh_gather_u32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32index_u32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16" + )] + fn _svld1uh_gather_u32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svld1uh_gather_u32index_u32(pg.sve_into(), base.as_signed(), indices.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64index_s64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svint64_t { + svld1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64index_s64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svint64_t { + svld1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64index_u64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svuint64_t { + svld1uh_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64index_u64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svuint64_t { + svld1uw_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svld1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svld1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_f32(pg: svbool_t, base: *const f32) -> svfloat32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.sret.nxv4f32" + )] + fn _svld2_f32(pg: svbool4_t, base: *const f32) -> svfloat32x2_t; + } + _svld2_f32(pg.sve_into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_f64(pg: svbool_t, base: *const f64) -> svfloat64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.sret.nxv2f64" + )] + fn _svld2_f64(pg: svbool2_t, base: *const f64) -> svfloat64x2_t; + } + _svld2_f64(pg.sve_into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_s8(pg: svbool_t, base: *const i8) -> svint8x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.sret.nxv16i8" + )] + fn _svld2_s8(pg: svbool_t, base: *const i8) -> svint8x2_t; + } + _svld2_s8(pg, base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_s16(pg: svbool_t, base: *const i16) -> svint16x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.sret.nxv8i16" + )] + fn _svld2_s16(pg: svbool8_t, base: *const i16) -> svint16x2_t; + } + _svld2_s16(pg.sve_into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_s32(pg: svbool_t, base: *const i32) -> svint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.sret.nxv4i32" + )] + fn _svld2_s32(pg: svbool4_t, base: *const i32) -> svint32x2_t; + } + _svld2_s32(pg.sve_into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_s64(pg: svbool_t, base: *const i64) -> svint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.sret.nxv2i64" + )] + fn _svld2_s64(pg: svbool2_t, base: *const i64) -> svint64x2_t; + } + _svld2_s64(pg.sve_into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_u8(pg: svbool_t, base: *const u8) -> svuint8x2_t { + svld2_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_u16(pg: svbool_t, base: *const u16) -> svuint16x2_t { + svld2_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_u32(pg: svbool_t, base: *const u32) -> svuint32x2_t { + svld2_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_u64(pg: svbool_t, base: *const u64) -> svuint64x2_t { + svld2_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x2_t { + svld2_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x2_t { + svld2_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x2_t { + svld2_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x2_t { + svld2_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x2_t { + svld2_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x2_t { + svld2_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x2_t { + svld2_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x2_t { + svld2_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x2_t { + svld2_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x2_t { + svld2_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_f32(pg: svbool_t, base: *const f32) -> svfloat32x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.sret.nxv4f32" + )] + fn _svld3_f32(pg: svbool4_t, base: *const f32) -> svfloat32x3_t; + } + _svld3_f32(pg.sve_into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_f64(pg: svbool_t, base: *const f64) -> svfloat64x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.sret.nxv2f64" + )] + fn _svld3_f64(pg: svbool2_t, base: *const f64) -> svfloat64x3_t; + } + _svld3_f64(pg.sve_into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_s8(pg: svbool_t, base: *const i8) -> svint8x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.sret.nxv16i8" + )] + fn _svld3_s8(pg: svbool_t, base: *const i8) -> svint8x3_t; + } + _svld3_s8(pg, base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_s16(pg: svbool_t, base: *const i16) -> svint16x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.sret.nxv8i16" + )] + fn _svld3_s16(pg: svbool8_t, base: *const i16) -> svint16x3_t; + } + _svld3_s16(pg.sve_into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_s32(pg: svbool_t, base: *const i32) -> svint32x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.sret.nxv4i32" + )] + fn _svld3_s32(pg: svbool4_t, base: *const i32) -> svint32x3_t; + } + _svld3_s32(pg.sve_into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_s64(pg: svbool_t, base: *const i64) -> svint64x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.sret.nxv2i64" + )] + fn _svld3_s64(pg: svbool2_t, base: *const i64) -> svint64x3_t; + } + _svld3_s64(pg.sve_into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_u8(pg: svbool_t, base: *const u8) -> svuint8x3_t { + svld3_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_u16(pg: svbool_t, base: *const u16) -> svuint16x3_t { + svld3_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_u32(pg: svbool_t, base: *const u32) -> svuint32x3_t { + svld3_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_u64(pg: svbool_t, base: *const u64) -> svuint64x3_t { + svld3_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x3_t { + svld3_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x3_t { + svld3_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x3_t { + svld3_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x3_t { + svld3_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x3_t { + svld3_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x3_t { + svld3_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x3_t { + svld3_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x3_t { + svld3_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x3_t { + svld3_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x3_t { + svld3_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_f32(pg: svbool_t, base: *const f32) -> svfloat32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.sret.nxv4f32" + )] + fn _svld4_f32(pg: svbool4_t, base: *const f32) -> svfloat32x4_t; + } + _svld4_f32(pg.sve_into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_f64(pg: svbool_t, base: *const f64) -> svfloat64x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.sret.nxv2f64" + )] + fn _svld4_f64(pg: svbool2_t, base: *const f64) -> svfloat64x4_t; + } + _svld4_f64(pg.sve_into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_s8(pg: svbool_t, base: *const i8) -> svint8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.sret.nxv16i8" + )] + fn _svld4_s8(pg: svbool_t, base: *const i8) -> svint8x4_t; + } + _svld4_s8(pg, base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_s16(pg: svbool_t, base: *const i16) -> svint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.sret.nxv8i16" + )] + fn _svld4_s16(pg: svbool8_t, base: *const i16) -> svint16x4_t; + } + _svld4_s16(pg.sve_into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_s32(pg: svbool_t, base: *const i32) -> svint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.sret.nxv4i32" + )] + fn _svld4_s32(pg: svbool4_t, base: *const i32) -> svint32x4_t; + } + _svld4_s32(pg.sve_into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_s64(pg: svbool_t, base: *const i64) -> svint64x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.sret.nxv2i64" + )] + fn _svld4_s64(pg: svbool2_t, base: *const i64) -> svint64x4_t; + } + _svld4_s64(pg.sve_into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_u8(pg: svbool_t, base: *const u8) -> svuint8x4_t { + svld4_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_u16(pg: svbool_t, base: *const u16) -> svuint16x4_t { + svld4_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_u32(pg: svbool_t, base: *const u32) -> svuint32x4_t { + svld4_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_u64(pg: svbool_t, base: *const u64) -> svuint64x4_t { + svld4_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x4_t { + svld4_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x4_t { + svld4_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x4_t { + svld4_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x4_t { + svld4_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x4_t { + svld4_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x4_t { + svld4_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x4_t { + svld4_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x4_t { + svld4_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x4_t { + svld4_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x4_t { + svld4_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4f32")] + fn _svldff1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svldff1_f32(pg.sve_into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2f64")] + fn _svldff1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svldff1_f64(pg.sve_into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv16i8")] + fn _svldff1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svldff1_s8(pg, base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i16")] + fn _svldff1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svldff1_s16(pg.sve_into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i32")] + fn _svldff1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svldff1_s32(pg.sve_into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i64")] + fn _svldff1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svldff1_s64(pg.sve_into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svldff1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svldff1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svldff1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svldff1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32index_f32( + pg: svbool_t, + base: *const f32, + indices: svint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4f32" + )] + fn _svldff1_gather_s32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_s32index_f32(pg.sve_into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32index_s32( + pg: svbool_t, + base: *const i32, + indices: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i32" + )] + fn _svldff1_gather_s32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_s32index_s32(pg.sve_into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32index_u32( + pg: svbool_t, + base: *const u32, + indices: svint32_t, +) -> svuint32_t { + svldff1_gather_s32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64index_f64( + pg: svbool_t, + base: *const f64, + indices: svint64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2f64" + )] + fn _svldff1_gather_s64index_f64( + pg: svbool2_t, + base: *const f64, + indices: svint64_t, + ) -> svfloat64_t; + } + _svldff1_gather_s64index_f64(pg.sve_into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64index_s64( + pg: svbool_t, + base: *const i64, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i64" + )] + fn _svldff1_gather_s64index_s64( + pg: svbool2_t, + base: *const i64, + indices: svint64_t, + ) -> svint64_t; + } + _svldff1_gather_s64index_s64(pg.sve_into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64index_u64( + pg: svbool_t, + base: *const u64, + indices: svint64_t, +) -> svuint64_t { + svldff1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32index_f32( + pg: svbool_t, + base: *const f32, + indices: svuint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4f32" + )] + fn _svldff1_gather_u32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_u32index_f32(pg.sve_into(), base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32index_s32( + pg: svbool_t, + base: *const i32, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i32" + )] + fn _svldff1_gather_u32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_u32index_s32(pg.sve_into(), base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32index_u32( + pg: svbool_t, + base: *const u32, + indices: svuint32_t, +) -> svuint32_t { + svldff1_gather_u32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64index_f64( + pg: svbool_t, + base: *const f64, + indices: svuint64_t, +) -> svfloat64_t { + svldff1_gather_s64index_f64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64index_s64( + pg: svbool_t, + base: *const i64, + indices: svuint64_t, +) -> svint64_t { + svldff1_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64index_u64( + pg: svbool_t, + base: *const u64, + indices: svuint64_t, +) -> svuint64_t { + svldff1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4f32" + )] + fn _svldff1_gather_s32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_s32offset_f32(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i32" + )] + fn _svldff1_gather_s32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_s32offset_s32(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svint32_t, +) -> svuint32_t { + svldff1_gather_s32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svint64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2f64" + )] + fn _svldff1_gather_s64offset_f64( + pg: svbool2_t, + base: *const f64, + offsets: svint64_t, + ) -> svfloat64_t; + } + _svldff1_gather_s64offset_f64(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i64" + )] + fn _svldff1_gather_s64offset_s64( + pg: svbool2_t, + base: *const i64, + offsets: svint64_t, + ) -> svint64_t; + } + _svldff1_gather_s64offset_s64(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svint64_t, +) -> svuint64_t { + svldff1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svuint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4f32" + )] + fn _svldff1_gather_u32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_u32offset_f32(pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i32" + )] + fn _svldff1_gather_u32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_u32offset_s32(pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svuint32_t, +) -> svuint32_t { + svldff1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svuint64_t, +) -> svfloat64_t { + svldff1_gather_s64offset_f64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svuint64_t, +) -> svint64_t { + svldff1_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svuint64_t, +) -> svuint64_t { + svldff1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t { + svldff1_gather_u32base_offset_f32(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t { + svldff1_gather_u64base_offset_f64(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svfloat32_t { + svldff1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldff1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldff1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svfloat64_t { + svldff1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svldff1_gather_u32base_offset_f32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svfloat32_t; + } + _svldff1_gather_u32base_offset_f32(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svldff1_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svint32_t; + } + _svldff1_gather_u32base_offset_s32(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldff1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svldff1_gather_u64base_offset_f64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svfloat64_t; + } + _svldff1_gather_u64base_offset_f64(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svldff1_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svint64_t; + } + _svldff1_gather_u64base_offset_s64(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svldff1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svldff1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svldff1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svldff1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svldff1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svldff1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svldff1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svldff1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svldff1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svldff1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8" + )] + fn _svldff1sb_gather_s32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sb_gather_s32offset_s32( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16" + )] + fn _svldff1sh_gather_s32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sh_gather_s32offset_s32( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svuint32_t { + svldff1sb_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svuint32_t { + svldff1sh_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i8" + )] + fn _svldff1sb_gather_s64offset_s64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sb_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i16" + )] + fn _svldff1sh_gather_s64offset_s64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sh_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i32" + )] + fn _svldff1sw_gather_s64offset_s64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sw_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svuint64_t { + svldff1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svuint64_t { + svldff1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svuint64_t { + svldff1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8" + )] + fn _svldff1sb_gather_u32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sb_gather_u32offset_s32( + pg.sve_into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16" + )] + fn _svldff1sh_gather_u32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sh_gather_u32offset_s32( + pg.sve_into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svuint32_t { + svldff1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svuint32_t { + svldff1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svint64_t { + svldff1sb_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svint64_t { + svldff1sh_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svint64_t { + svldff1sw_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svuint64_t { + svldff1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svuint64_t { + svldff1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svuint64_t { + svldff1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldff1sb_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sb_gather_u32base_offset_s32( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldff1sh_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sh_gather_u32base_offset_s32( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldff1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldff1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldff1sb_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sb_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldff1sh_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sh_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldff1sw_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sw_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1sb_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1sh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1sb_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1sh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1sb_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1sh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1sw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1sb_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1sh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1sw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i8")] + fn _svldff1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sb_s16(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i8")] + fn _svldff1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sb_s32(pg.sve_into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i16")] + fn _svldff1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sh_s32(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i8")] + fn _svldff1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sb_s64(pg.sve_into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i16")] + fn _svldff1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sh_s64(pg.sve_into(), base)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i32")] + fn _svldff1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sw_s64(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t { + svldff1sb_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t { + svldff1sb_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t { + svldff1sh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t { + svldff1sb_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t { + svldff1sh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t { + svldff1sw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t { + svldff1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t { + svldff1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t { + svldff1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t { + svldff1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t { + svldff1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t { + svldff1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t { + svldff1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t { + svldff1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t { + svldff1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t { + svldff1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t { + svldff1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t { + svldff1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32index_s32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16" + )] + fn _svldff1sh_gather_s32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sh_gather_s32index_s32( + pg.sve_into(), + base, + indices, + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32index_u32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svuint32_t { + svldff1sh_gather_s32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64index_s64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i16" + )] + fn _svldff1sh_gather_s64index_s64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sh_gather_s64index_s64( + pg.sve_into(), + base, + indices, + )) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64index_s64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i32" + )] + fn _svldff1sw_gather_s64index_s64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sw_gather_s64index_s64( + pg.sve_into(), + base, + indices, + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64index_u64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svuint64_t { + svldff1sh_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64index_u64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svuint64_t { + svldff1sw_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32index_s32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16" + )] + fn _svldff1sh_gather_u32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svldff1sh_gather_u32index_s32( + pg.sve_into(), + base, + indices.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32index_u32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svuint32_t { + svldff1sh_gather_u32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64index_s64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svint64_t { + svldff1sh_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64index_s64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svint64_t { + svldff1sw_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64index_u64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svuint64_t { + svldff1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64index_u64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svuint64_t { + svldff1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldff1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldff1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svint32_t { + svldff1ub_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svint32_t { + svldff1uh_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8" + )] + fn _svldff1ub_gather_s32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1ub_gather_s32offset_u32(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16" + )] + fn _svldff1uh_gather_s32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1uh_gather_s32offset_u32(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svint64_t { + svldff1ub_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svint64_t { + svldff1uh_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svint64_t { + svldff1uw_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i8" + )] + fn _svldff1ub_gather_s64offset_u64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1ub_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i16" + )] + fn _svldff1uh_gather_s64offset_u64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1uh_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i32" + )] + fn _svldff1uw_gather_s64offset_u64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1uw_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svint32_t { + svldff1ub_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svint32_t { + svldff1uh_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8" + )] + fn _svldff1ub_gather_u32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1ub_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16" + )] + fn _svldff1uh_gather_u32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1uh_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svint64_t { + svldff1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svint64_t { + svldff1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svint64_t { + svldff1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svuint64_t { + svldff1ub_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svuint64_t { + svldff1uh_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svuint64_t { + svldff1uw_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldff1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldff1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldff1ub_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1ub_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldff1uh_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1uh_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldff1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldff1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldff1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldff1ub_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1ub_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldff1uh_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1uh_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldff1uw_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1uw_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1ub_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1uh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1ub_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1uh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1ub_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1uh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1uw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1ub_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1uh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1uw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i8")] + fn _svldff1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1ub_s16(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i8")] + fn _svldff1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1ub_s32(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i16")] + fn _svldff1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1uh_s32(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i8")] + fn _svldff1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1ub_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i16")] + fn _svldff1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1uh_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i32")] + fn _svldff1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1uw_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t { + svldff1ub_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t { + svldff1ub_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t { + svldff1uh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t { + svldff1ub_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t { + svldff1uh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t { + svldff1uw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t { + svldff1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t { + svldff1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t { + svldff1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t { + svldff1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t { + svldff1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t { + svldff1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t { + svldff1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t { + svldff1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t { + svldff1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t { + svldff1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t { + svldff1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t { + svldff1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32index_s32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svint32_t { + svldff1uh_gather_s32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32index_u32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16" + )] + fn _svldff1uh_gather_s32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1uh_gather_s32index_u32(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64index_s64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svint64_t { + svldff1uh_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64index_s64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svint64_t { + svldff1uw_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64index_u64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i16" + )] + fn _svldff1uh_gather_s64index_u64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1uh_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64index_u64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i32" + )] + fn _svldff1uw_gather_s64index_u64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1uw_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32index_s32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svint32_t { + svldff1uh_gather_u32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32index_u32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16" + )] + fn _svldff1uh_gather_u32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldff1uh_gather_u32index_u32(pg.sve_into(), base.as_signed(), indices.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64index_s64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svint64_t { + svldff1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64index_s64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svint64_t { + svldff1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64index_u64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svuint64_t { + svldff1uh_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64index_u64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svuint64_t { + svldff1uw_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldff1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldff1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4f32")] + fn _svldnf1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svldnf1_f32(pg.sve_into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2f64")] + fn _svldnf1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svldnf1_f64(pg.sve_into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv16i8")] + fn _svldnf1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svldnf1_s8(pg, base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i16")] + fn _svldnf1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svldnf1_s16(pg.sve_into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i32")] + fn _svldnf1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svldnf1_s32(pg.sve_into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i64")] + fn _svldnf1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svldnf1_s64(pg.sve_into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svldnf1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svldnf1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svldnf1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svldnf1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svldnf1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svldnf1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svldnf1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svldnf1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svldnf1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svldnf1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svldnf1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svldnf1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svldnf1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svldnf1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i8")] + fn _svldnf1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svldnf1sb_s16(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i8")] + fn _svldnf1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svldnf1sb_s32(pg.sve_into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i16")] + fn _svldnf1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svldnf1sh_s32(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i8")] + fn _svldnf1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svldnf1sb_s64(pg.sve_into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i16")] + fn _svldnf1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svldnf1sh_s64(pg.sve_into(), base)) +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i32")] + fn _svldnf1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast(_svldnf1sw_s64(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t { + svldnf1sb_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t { + svldnf1sb_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t { + svldnf1sh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t { + svldnf1sb_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t { + svldnf1sh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t { + svldnf1sw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t { + svldnf1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t { + svldnf1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t { + svldnf1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t { + svldnf1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t { + svldnf1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t { + svldnf1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t { + svldnf1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t { + svldnf1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t { + svldnf1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t { + svldnf1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t { + svldnf1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t { + svldnf1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i8")] + fn _svldnf1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldnf1ub_s16(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i8")] + fn _svldnf1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldnf1ub_s32(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i16")] + fn _svldnf1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldnf1uh_s32(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i8")] + fn _svldnf1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldnf1ub_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i16")] + fn _svldnf1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldnf1uh_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i32")] + fn _svldnf1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldnf1uw_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t { + svldnf1ub_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t { + svldnf1ub_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t { + svldnf1uh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t { + svldnf1ub_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t { + svldnf1uh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t { + svldnf1uw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t { + svldnf1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t { + svldnf1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t { + svldnf1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t { + svldnf1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t { + svldnf1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t { + svldnf1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t { + svldnf1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t { + svldnf1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t { + svldnf1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t { + svldnf1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t { + svldnf1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t { + svldnf1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv4f32")] + fn _svldnt1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svldnt1_f32(pg.sve_into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv2f64")] + fn _svldnt1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svldnt1_f64(pg.sve_into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv16i8")] + fn _svldnt1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svldnt1_s8(pg, base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv8i16")] + fn _svldnt1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svldnt1_s16(pg.sve_into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv4i32")] + fn _svldnt1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svldnt1_s32(pg.sve_into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv2i64")] + fn _svldnt1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svldnt1_s64(pg.sve_into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svldnt1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svldnt1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svldnt1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svldnt1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svldnt1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svldnt1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svldnt1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svldnt1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svldnt1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svldnt1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svldnt1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svldnt1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svldnt1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svldnt1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svlen_f32(_op: svfloat32_t) -> u64 { + svcntw() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svlen_f64(_op: svfloat64_t) -> u64 { + svcntd() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rdvl))] +pub fn svlen_s8(_op: svint8_t) -> u64 { + svcntb() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnth))] +pub fn svlen_s16(_op: svint16_t) -> u64 { + svcnth() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svlen_s32(_op: svint32_t) -> u64 { + svcntw() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svlen_s64(_op: svint64_t) -> u64 { + svcntd() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rdvl))] +pub fn svlen_u8(_op: svuint8_t) -> u64 { + svcntb() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnth))] +pub fn svlen_u16(_op: svuint16_t) -> u64 { + svcnth() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svlen_u32(_op: svuint32_t) -> u64 { + svcntw() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svlen_u64(_op: svuint64_t) -> u64 { + svcntd() +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv16i8")] + fn _svlsl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svlsl_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svlsl_s8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svlsl_s8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svlsl_s8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svlsl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svlsl_s8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv8i16")] + fn _svlsl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svlsl_s16_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svlsl_s16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svlsl_s16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svlsl_s16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svlsl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svlsl_s16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv4i32")] + fn _svlsl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svlsl_s32_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svlsl_s32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svlsl_s32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svlsl_s32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svlsl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svlsl_s32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv2i64")] + fn _svlsl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svlsl_s64_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svlsl_s64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svlsl_s64_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svlsl_s64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svlsl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svlsl_s64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svlsl_s8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsl_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsl_u8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsl_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsl_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svlsl_s16_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsl_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsl_u16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsl_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsl_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svlsl_s32_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsl_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsl_u32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsl_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsl_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svlsl_s64_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsl_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsl_u64_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsl_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsl_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsl.wide.nxv16i8" + )] + fn _svlsl_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t; + } + unsafe { _svlsl_wide_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svlsl_wide_s8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svlsl_wide_s8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svlsl_wide_s8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svlsl_wide_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svlsl_wide_s8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsl.wide.nxv8i16" + )] + fn _svlsl_wide_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t; + } + unsafe { _svlsl_wide_s16_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svlsl_wide_s16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svlsl_wide_s16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svlsl_wide_s16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svlsl_wide_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svlsl_wide_s16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsl.wide.nxv4i32" + )] + fn _svlsl_wide_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svlsl_wide_s32_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svlsl_wide_s32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svlsl_wide_s32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svlsl_wide_s32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svlsl_wide_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svlsl_wide_s32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + unsafe { svlsl_wide_s8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsl_wide_u8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsl_wide_u8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsl_wide_u8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsl_wide_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsl_wide_u8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + unsafe { svlsl_wide_s16_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsl_wide_u16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsl_wide_u16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsl_wide_u16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsl_wide_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsl_wide_u16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + unsafe { svlsl_wide_s32_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsl_wide_u32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsl_wide_u32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsl_wide_u32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsl_wide_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsl_wide_u32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv16i8")] + fn _svlsr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svlsr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsr_u8_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv8i16")] + fn _svlsr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svlsr_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsr_u16_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv4i32")] + fn _svlsr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svlsr_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsr_u32_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv2i64")] + fn _svlsr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svlsr_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsr_u64_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsr.wide.nxv16i8" + )] + fn _svlsr_wide_u8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t; + } + unsafe { _svlsr_wide_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsr_wide_u8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsr_wide_u8_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsr_wide_u8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsr_wide_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsr_wide_u8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsr.wide.nxv8i16" + )] + fn _svlsr_wide_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t; + } + unsafe { _svlsr_wide_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsr_wide_u16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsr_wide_u16_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsr_wide_u16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsr_wide_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsr_wide_u16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsr.wide.nxv4i32" + )] + fn _svlsr_wide_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svlsr_wide_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsr_wide_u32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsr_wide_u32_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsr_wide_u32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsr_wide_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsr_wide_u32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmad.nxv4f32")] + fn _svmad_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmad_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmad_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmad_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmad_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmad_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmad_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmad.nxv2f64")] + fn _svmad_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmad_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmad_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmad_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmad_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmad_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmad_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv16i8")] + fn _svmad_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmad_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmad_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmad_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmad_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmad_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmad_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv8i16")] + fn _svmad_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmad_s16_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmad_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmad_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmad_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmad_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmad_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv4i32")] + fn _svmad_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmad_s32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmad_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmad_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmad_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmad_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmad_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv2i64")] + fn _svmad_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmad_s64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmad_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmad_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmad_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmad_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmad_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmad_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmad_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmad_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmad_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmad_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmad_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmad_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmad_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmad_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmad_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmad_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmad_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmad_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmad_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmad_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmad_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmad_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmad_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmad_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmad_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmad_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmad_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmad_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmad_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmax.nxv4f32")] + fn _svmax_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmax_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmax_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmax_f32_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmax_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmax_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmax_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmax.nxv2f64")] + fn _svmax_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmax_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmax_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmax_f64_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmax_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmax_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmax_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv16i8")] + fn _svmax_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmax_s8_m(pg, op1, op2) } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmax_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmax_s8_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmax_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmax_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmax_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv8i16")] + fn _svmax_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmax_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmax_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmax_s16_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmax_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmax_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmax_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv4i32")] + fn _svmax_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmax_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmax_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmax_s32_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmax_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmax_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmax_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv2i64")] + fn _svmax_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmax_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmax_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmax_s64_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmax_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmax_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmax_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv16i8")] + fn _svmax_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmax_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmax_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmax_u8_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmax_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmax_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmax_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv8i16")] + fn _svmax_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmax_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmax_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmax_u16_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmax_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmax_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmax_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv4i32")] + fn _svmax_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmax_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmax_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmax_u32_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmax_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmax_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmax_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv2i64")] + fn _svmax_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmax_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmax_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmax_u64_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmax_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmax_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmax_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxnm.nxv4f32")] + fn _svmaxnm_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmaxnm_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmaxnm_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxnm_f32_m(pg, op1, op2) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmaxnm_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxnm_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmaxnm_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxnm.nxv2f64")] + fn _svmaxnm_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmaxnm_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmaxnm_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxnm_f64_m(pg, op1, op2) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmaxnm_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxnm_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmaxnm_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum number reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmv[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnmv))] +pub fn svmaxnmv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmv.nxv4f32" + )] + fn _svmaxnmv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svmaxnmv_f32(pg.sve_into(), op) } +} +#[doc = "Maximum number reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmv[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnmv))] +pub fn svmaxnmv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmv.nxv2f64" + )] + fn _svmaxnmv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svmaxnmv_f64(pg.sve_into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxv))] +pub fn svmaxv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxv.nxv4f32")] + fn _svmaxv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svmaxv_f32(pg.sve_into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxv))] +pub fn svmaxv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxv.nxv2f64")] + fn _svmaxv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svmaxv_f64(pg.sve_into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv16i8")] + fn _svmaxv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svmaxv_s8(pg, op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv8i16")] + fn _svmaxv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svmaxv_s16(pg.sve_into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv4i32")] + fn _svmaxv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svmaxv_s32(pg.sve_into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv2i64")] + fn _svmaxv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svmaxv_s64(pg.sve_into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv16i8")] + fn _svmaxv_u8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svmaxv_u8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv8i16")] + fn _svmaxv_u16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svmaxv_u16(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv4i32")] + fn _svmaxv_u32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svmaxv_u32(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv2i64")] + fn _svmaxv_u64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svmaxv_u64(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmin.nxv4f32")] + fn _svmin_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmin_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmin_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmin_f32_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmin_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmin_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmin_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmin.nxv2f64")] + fn _svmin_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmin_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmin_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmin_f64_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmin_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmin_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmin_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv16i8")] + fn _svmin_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmin_s8_m(pg, op1, op2) } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmin_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmin_s8_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmin_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmin_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmin_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv8i16")] + fn _svmin_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmin_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmin_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmin_s16_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmin_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmin_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmin_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv4i32")] + fn _svmin_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmin_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmin_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmin_s32_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmin_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmin_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmin_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv2i64")] + fn _svmin_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmin_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmin_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmin_s64_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmin_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmin_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmin_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv16i8")] + fn _svmin_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmin_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmin_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmin_u8_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmin_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmin_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmin_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv8i16")] + fn _svmin_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmin_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmin_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmin_u16_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmin_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmin_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmin_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv4i32")] + fn _svmin_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmin_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmin_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmin_u32_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmin_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmin_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmin_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv2i64")] + fn _svmin_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmin_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmin_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmin_u64_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmin_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmin_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmin_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminnm.nxv4f32")] + fn _svminnm_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svminnm_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svminnm_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminnm_f32_m(pg, op1, op2) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svminnm_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminnm_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svminnm_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminnm.nxv2f64")] + fn _svminnm_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svminnm_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svminnm_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminnm_f64_m(pg, op1, op2) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svminnm_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminnm_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svminnm_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum number reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmv[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnmv))] +pub fn svminnmv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmv.nxv4f32" + )] + fn _svminnmv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svminnmv_f32(pg.sve_into(), op) } +} +#[doc = "Minimum number reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmv[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnmv))] +pub fn svminnmv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmv.nxv2f64" + )] + fn _svminnmv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svminnmv_f64(pg.sve_into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminv))] +pub fn svminv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminv.nxv4f32")] + fn _svminv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svminv_f32(pg.sve_into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminv))] +pub fn svminv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminv.nxv2f64")] + fn _svminv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svminv_f64(pg.sve_into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv16i8")] + fn _svminv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svminv_s8(pg, op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv8i16")] + fn _svminv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svminv_s16(pg.sve_into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv4i32")] + fn _svminv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svminv_s32(pg.sve_into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv2i64")] + fn _svminv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svminv_s64(pg.sve_into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv16i8")] + fn _svminv_u8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svminv_u8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv8i16")] + fn _svminv_u16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svminv_u16(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv4i32")] + fn _svminv_u32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svminv_u32(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv2i64")] + fn _svminv_u64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svminv_u64(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmla.nxv4f32")] + fn _svmla_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmla_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmla_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmla_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmla_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmla_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmla_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmla.nxv2f64")] + fn _svmla_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmla_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmla_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmla_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmla_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmla_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmla_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv16i8")] + fn _svmla_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmla_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmla_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmla_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmla_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmla_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmla_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv8i16")] + fn _svmla_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmla_s16_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmla_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmla_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmla_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmla_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmla_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv4i32")] + fn _svmla_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmla_s32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmla_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmla_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmla_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmla_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmla_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv2i64")] + fn _svmla_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmla_s64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmla_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmla_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmla_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmla_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmla_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmla_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmla_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmla_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmla_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmla_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmla_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmla_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmla_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmla_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmla_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmla_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmla_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmla_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmla_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmla_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmla_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmla_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmla_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmla_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmla_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmla_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmla_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmla_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmla_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla, IMM_INDEX = 0))] +pub fn svmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmla.lane.nxv4f32" + )] + fn _svmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + IMM_INDEX: i32, + ) -> svfloat32_t; + } + unsafe { _svmla_lane_f32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla, IMM_INDEX = 0))] +pub fn svmla_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmla.lane.nxv2f64" + )] + fn _svmla_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + IMM_INDEX: i32, + ) -> svfloat64_t; + } + unsafe { _svmla_lane_f64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmls.nxv4f32")] + fn _svmls_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmls_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmls_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmls_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmls_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmls_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmls_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmls.nxv2f64")] + fn _svmls_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmls_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmls_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmls_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmls_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmls_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmls_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv16i8")] + fn _svmls_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmls_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmls_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmls_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmls_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmls_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmls_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv8i16")] + fn _svmls_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmls_s16_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmls_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmls_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmls_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmls_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmls_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv4i32")] + fn _svmls_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmls_s32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmls_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmls_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmls_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmls_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmls_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv2i64")] + fn _svmls_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmls_s64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmls_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmls_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmls_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmls_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmls_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmls_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmls_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmls_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmls_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmls_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmls_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmls_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmls_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmls_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmls_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmls_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmls_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmls_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmls_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmls_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmls_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmls_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmls_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmls_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmls_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmls_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmls_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmls_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmls_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls, IMM_INDEX = 0))] +pub fn svmls_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmls.lane.nxv4f32" + )] + fn _svmls_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + IMM_INDEX: i32, + ) -> svfloat32_t; + } + unsafe { _svmls_lane_f32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls, IMM_INDEX = 0))] +pub fn svmls_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmls.lane.nxv2f64" + )] + fn _svmls_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + IMM_INDEX: i32, + ) -> svfloat64_t; + } + unsafe { _svmls_lane_f64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f32mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmmla))] +pub fn svmmla_f32(op1: svfloat32_t, op2: svfloat32_t, op3: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmmla.nxv4f32")] + fn _svmmla_f32(op1: svfloat32_t, op2: svfloat32_t, op3: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmmla_f32(op1, op2, op3) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmmla))] +pub fn svmmla_f64(op1: svfloat64_t, op2: svfloat64_t, op3: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmmla.nxv2f64")] + fn _svmmla_f64(op1: svfloat64_t, op2: svfloat64_t, op3: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmmla_f64(op1, op2, op3) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smmla))] +pub fn svmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smmla.nxv4i32")] + fn _svmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svmmla_s32(op1, op2, op3) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ummla))] +pub fn svmmla_u32(op1: svuint32_t, op2: svuint8_t, op3: svuint8_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ummla.nxv4i32")] + fn _svmmla_u32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svmmla_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmov[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svmov_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + svand_b_z(pg, op, op) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmsb.nxv4f32")] + fn _svmsb_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmsb_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmsb_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmsb_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmsb_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmsb_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmsb_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmsb.nxv2f64")] + fn _svmsb_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmsb_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmsb_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmsb_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmsb_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmsb_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmsb_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv16i8")] + fn _svmsb_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmsb_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmsb_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmsb_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmsb_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmsb_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmsb_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv8i16")] + fn _svmsb_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmsb_s16_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmsb_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmsb_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmsb_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmsb_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmsb_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv4i32")] + fn _svmsb_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmsb_s32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmsb_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmsb_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmsb_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmsb_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmsb_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv2i64")] + fn _svmsb_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmsb_s64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmsb_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmsb_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmsb_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmsb_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmsb_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmsb_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmsb_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmsb_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmsb_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmsb_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmsb_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmsb_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmsb_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmsb_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmsb_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmsb_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmsb_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmsb_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmsb_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmsb_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmsb_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmsb_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmsb_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmsb_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmsb_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmsb_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmsb_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmsb_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmsb_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv4f32")] + fn _svmul_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmul_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmul_f32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmul_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv2f64")] + fn _svmul_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmul_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmul_f64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmul_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv16i8")] + fn _svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmul_s8_m(pg, op1, op2) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmul_s8_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmul_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv8i16")] + fn _svmul_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmul_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmul_s16_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmul_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv4i32")] + fn _svmul_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmul_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmul_s32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmul_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv2i64")] + fn _svmul_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmul_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmul_s64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmul_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svmul_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmul_u8_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmul_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svmul_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmul_u16_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmul_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svmul_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmul_u32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmul_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svmul_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmul_u64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmul_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv16i8")] + fn _svmulh_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmulh_s8_m(pg, op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmulh_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmulh_s8_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmulh_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmulh_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmulh_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv8i16")] + fn _svmulh_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmulh_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmulh_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmulh_s16_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmulh_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmulh_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmulh_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv4i32")] + fn _svmulh_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmulh_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmulh_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmulh_s32_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmulh_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmulh_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmulh_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv2i64")] + fn _svmulh_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmulh_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmulh_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmulh_s64_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmulh_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmulh_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmulh_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv16i8")] + fn _svmulh_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmulh_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmulh_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmulh_u8_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmulh_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmulh_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmulh_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv8i16")] + fn _svmulh_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmulh_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmulh_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmulh_u16_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmulh_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmulh_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmulh_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv4i32")] + fn _svmulh_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmulh_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmulh_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmulh_u32_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmulh_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmulh_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmulh_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv2i64")] + fn _svmulh_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmulh_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmulh_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmulh_u64_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmulh_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmulh_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmulh_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmulx.nxv4f32")] + fn _svmulx_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmulx_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmulx_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmulx_f32_m(pg, op1, op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmulx_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmulx_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmulx_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmulx.nxv2f64")] + fn _svmulx_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmulx_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmulx_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmulx_f64_m(pg, op1, op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmulx_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmulx_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmulx_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Bitwise NAND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnand[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nand))] +pub fn svnand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nand.z.nxv16i1")] + fn _svnand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svnand_b_z(pg, op1, op2) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fneg.nxv4f32")] + fn _svneg_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svneg_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svneg_f32_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svneg_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fneg.nxv2f64")] + fn _svneg_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svneg_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svneg_f64_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svneg_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv16i8")] + fn _svneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svneg_s8_m(inactive, pg, op) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svneg_s8_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svneg_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv8i16")] + fn _svneg_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svneg_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svneg_s16_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svneg_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv4i32")] + fn _svneg_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svneg_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svneg_s32_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svneg_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv2i64")] + fn _svneg_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svneg_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svneg_s64_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svneg_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmad.nxv4f32")] + fn _svnmad_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmad_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmad_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmad_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmad_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmad_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmad_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmad.nxv2f64")] + fn _svnmad_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmad_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmad_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmad_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmad_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmad_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmad_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmla.nxv4f32")] + fn _svnmla_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmla_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmla_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmla_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmla_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmla_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmla_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmla.nxv2f64")] + fn _svnmla_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmla_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmla_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmla_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmla_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmla_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmla_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmls.nxv4f32")] + fn _svnmls_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmls_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmls_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmls_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmls_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmls_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmls_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmls.nxv2f64")] + fn _svnmls_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmls_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmls_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmls_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmls_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmls_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmls_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmsb.nxv4f32")] + fn _svnmsb_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmsb_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmsb_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmsb_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmsb_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmsb_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmsb_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmsb.nxv2f64")] + fn _svnmsb_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmsb_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmsb_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmsb_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmsb_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmsb_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmsb_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Bitwise NOR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnor[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nor))] +pub fn svnor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nor.z.nxv16i1")] + fn _svnor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svnor_b_z(pg, op1, op2) } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + sveor_b_z(pg, op, pg) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv16i8")] + fn _svnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svnot_s8_m(inactive, pg, op) } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svnot_s8_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svnot_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv8i16")] + fn _svnot_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svnot_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svnot_s16_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svnot_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv4i32")] + fn _svnot_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svnot_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svnot_s32_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svnot_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv2i64")] + fn _svnot_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svnot_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svnot_s64_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svnot_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svnot_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svnot_u8_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svnot_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svnot_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svnot_u16_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svnot_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svnot_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svnot_u32_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svnot_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svnot_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svnot_u64_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svnot_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Bitwise inclusive OR, inverting second argument"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorn[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orn))] +pub fn svorn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orn.z.nvx16i1")] + fn _svorn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svorn_b_z(pg, op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.z.nvx16i1")] + fn _svorr_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svorr_b_z(pg, op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv16i8")] + fn _svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svorr_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svorr_s8_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svorr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv8i16")] + fn _svorr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svorr_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svorr_s16_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svorr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv4i32")] + fn _svorr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svorr_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svorr_s32_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svorr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv2i64")] + fn _svorr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svorr_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svorr_s64_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svorr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svorr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svorr_u8_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svorr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svorr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svorr_u16_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svorr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svorr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svorr_u32_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svorr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svorr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svorr_u64_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svorr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv16i8")] + fn _svorv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svorv_s8(pg, op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv8i16")] + fn _svorv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svorv_s16(pg.sve_into(), op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv4i32")] + fn _svorv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svorv_s32(pg.sve_into(), op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv2i64")] + fn _svorv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svorv_s64(pg.sve_into(), op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svorv_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svorv_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svorv_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svorv_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Set all predicate elements to false"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpfalse[_b])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pfalse))] +pub fn svpfalse_b() -> svbool_t { + svdupq_n_b8( + false, false, false, false, false, false, false, false, false, false, false, false, false, + false, false, false, + ) +} +#[doc = "Set the first active predicate element to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpfirst[_b])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pfirst))] +pub fn svpfirst_b(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pfirst.nxv16i1")] + fn _svpfirst_b(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svpfirst_b(pg, op) } +} +#[doc = "Find next active predicate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b8(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv16i1")] + fn _svpnext_b8(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svpnext_b8(pg, op) } +} +#[doc = "Find next active predicate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b16(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv8i1")] + fn _svpnext_b16(pg: svbool8_t, op: svbool8_t) -> svbool8_t; + } + unsafe { _svpnext_b16(pg.sve_into(), op.sve_into()).sve_into() } +} +#[doc = "Find next active predicate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b32(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv4i1")] + fn _svpnext_b32(pg: svbool4_t, op: svbool4_t) -> svbool4_t; + } + unsafe { _svpnext_b32(pg.sve_into(), op.sve_into()).sve_into() } +} +#[doc = "Find next active predicate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b64(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv2i1")] + fn _svpnext_b64(pg: svbool2_t, op: svbool2_t) -> svbool2_t; + } + unsafe { _svpnext_b64(pg.sve_into(), op.sve_into()).sve_into() } +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb(pg: svbool_t, base: *const T) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv16i1")] + fn _svprfb(pg: svbool_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfb(pg, base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh(pg: svbool_t, base: *const T) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv8i1")] + fn _svprfh(pg: svbool8_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfh(pg.sve_into(), base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw(pg: svbool_t, base: *const T) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv4i1")] + fn _svprfw(pg: svbool4_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfw(pg.sve_into(), base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd(pg: svbool_t, base: *const T) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv2i1")] + fn _svprfd(pg: svbool2_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfd(pg.sve_into(), base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[s32]offset)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_s32offset( + pg: svbool_t, + base: *const T, + offsets: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.sxtw.index.nxv4i32" + )] + fn _svprfb_gather_s32offset( + pg: svbool4_t, + base: *const crate::ffi::c_void, + offsets: svint32_t, + op: svprfop, + ); + } + _svprfb_gather_s32offset( + pg.sve_into(), + base as *const crate::ffi::c_void, + offsets, + OP, + ) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[s32]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_s32index( + pg: svbool_t, + base: *const T, + indices: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.sxtw.index.nxv4i32" + )] + fn _svprfh_gather_s32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfh_gather_s32index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices, + OP, + ) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[s32]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_s32index( + pg: svbool_t, + base: *const T, + indices: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.sxtw.index.nxv4i32" + )] + fn _svprfw_gather_s32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfw_gather_s32index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices, + OP, + ) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[s32]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_s32index( + pg: svbool_t, + base: *const T, + indices: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.sxtw.index.nxv4i32" + )] + fn _svprfd_gather_s32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfd_gather_s32index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices, + OP, + ) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[s64]offset)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_s64offset( + pg: svbool_t, + base: *const T, + offsets: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.index.nxv2i64" + )] + fn _svprfb_gather_s64offset( + pg: svbool2_t, + base: *const crate::ffi::c_void, + offsets: svint64_t, + op: svprfop, + ); + } + _svprfb_gather_s64offset( + pg.sve_into(), + base as *const crate::ffi::c_void, + offsets, + OP, + ) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[s64]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_s64index( + pg: svbool_t, + base: *const T, + indices: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.index.nxv2i64" + )] + fn _svprfh_gather_s64index( + pg: svbool2_t, + base: *const crate::ffi::c_void, + indices: svint64_t, + op: svprfop, + ); + } + _svprfh_gather_s64index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices, + OP, + ) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[s64]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_s64index( + pg: svbool_t, + base: *const T, + indices: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.index.nxv2i64" + )] + fn _svprfw_gather_s64index( + pg: svbool2_t, + base: *const crate::ffi::c_void, + indices: svint64_t, + op: svprfop, + ); + } + _svprfw_gather_s64index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices, + OP, + ) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[s64]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_s64index( + pg: svbool_t, + base: *const T, + indices: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.index.nxv2i64" + )] + fn _svprfd_gather_s64index( + pg: svbool2_t, + base: *const crate::ffi::c_void, + indices: svint64_t, + op: svprfop, + ); + } + _svprfd_gather_s64index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices, + OP, + ) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[u32]offset)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_u32offset( + pg: svbool_t, + base: *const T, + offsets: svuint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.uxtw.index.nxv4i32" + )] + fn _svprfb_gather_u32offset( + pg: svbool4_t, + base: *const crate::ffi::c_void, + offsets: svint32_t, + op: svprfop, + ); + } + _svprfb_gather_u32offset( + pg.sve_into(), + base as *const crate::ffi::c_void, + offsets.as_signed(), + OP, + ) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[u32]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_u32index( + pg: svbool_t, + base: *const T, + indices: svuint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.uxtw.index.nxv4i32" + )] + fn _svprfh_gather_u32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfh_gather_u32index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices.as_signed(), + OP, + ) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[u32]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_u32index( + pg: svbool_t, + base: *const T, + indices: svuint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.uxtw.index.nxv4i32" + )] + fn _svprfw_gather_u32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfw_gather_u32index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices.as_signed(), + OP, + ) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[u32]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_u32index( + pg: svbool_t, + base: *const T, + indices: svuint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.uxtw.index.nxv4i32" + )] + fn _svprfd_gather_u32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfd_gather_u32index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices.as_signed(), + OP, + ) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[u64]offset)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_u64offset( + pg: svbool_t, + base: *const T, + offsets: svuint64_t, +) { + svprfb_gather_s64offset::(pg, base, offsets.as_signed()) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[u64]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_u64index( + pg: svbool_t, + base: *const T, + indices: svuint64_t, +) { + svprfh_gather_s64index::(pg, base, indices.as_signed()) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[u64]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_u64index( + pg: svbool_t, + base: *const T, + indices: svuint64_t, +) { + svprfw_gather_s64index::(pg, base, indices.as_signed()) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[u64]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_u64index( + pg: svbool_t, + base: *const T, + indices: svuint64_t, +) { + svprfd_gather_s64index::(pg, base, indices.as_signed()) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u32base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv4i32" + )] + fn _svprfb_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfb_gather_u32base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u32base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv4i32" + )] + fn _svprfh_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfh_gather_u32base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u32base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv4i32" + )] + fn _svprfw_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfw_gather_u32base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u32base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv4i32" + )] + fn _svprfd_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfd_gather_u32base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u64base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv2i64" + )] + fn _svprfb_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfb_gather_u64base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u64base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv2i64" + )] + fn _svprfh_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfh_gather_u64base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u64base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv2i64" + )] + fn _svprfw_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfw_gather_u64base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u64base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv2i64" + )] + fn _svprfd_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfd_gather_u64base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u32base]_offset)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u32base_offset( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv4i32" + )] + fn _svprfb_gather_u32base_offset(pg: svbool4_t, bases: svint32_t, offset: i64, op: svprfop); + } + _svprfb_gather_u32base_offset(pg.sve_into(), bases.as_signed(), offset, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u32base]_index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u32base_index( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv4i32" + )] + fn _svprfh_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfh_gather_u32base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(1), OP) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u32base]_index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u32base_index( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv4i32" + )] + fn _svprfw_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfw_gather_u32base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(2), OP) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u32base]_index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u32base_index( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv4i32" + )] + fn _svprfd_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfd_gather_u32base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(3), OP) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u64base]_offset)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u64base_offset( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv2i64" + )] + fn _svprfb_gather_u64base_offset(pg: svbool2_t, bases: svint64_t, offset: i64, op: svprfop); + } + _svprfb_gather_u64base_offset(pg.sve_into(), bases.as_signed(), offset, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u64base]_index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u64base_index( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv2i64" + )] + fn _svprfh_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfh_gather_u64base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(1), OP) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u64base]_index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u64base_index( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv2i64" + )] + fn _svprfw_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfw_gather_u64base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(2), OP) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u64base]_index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u64base_index( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv2i64" + )] + fn _svprfd_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfd_gather_u64base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(3), OP) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_vnum)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfb::(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_vnum)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfh::(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_vnum)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfw::(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_vnum)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfd::(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Test whether any active element is true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_any)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptest))] +pub fn svptest_any(pg: svbool_t, op: svbool_t) -> bool { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ptest.any.nxv16i1" + )] + fn _svptest_any(pg: svbool_t, op: svbool_t) -> bool; + } + unsafe { _svptest_any(pg, op) } +} +#[doc = "Test whether first active element is true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_first)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptest))] +pub fn svptest_first(pg: svbool_t, op: svbool_t) -> bool { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ptest.first.nxv16i1" + )] + fn _svptest_first(pg: svbool_t, op: svbool_t) -> bool; + } + unsafe { _svptest_first(pg, op) } +} +#[doc = "Test whether last active element is true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_last)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptest))] +pub fn svptest_last(pg: svbool_t, op: svbool_t) -> bool { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ptest.last.nxv16i1" + )] + fn _svptest_last(pg: svbool_t, op: svbool_t) -> bool; + } + unsafe { _svptest_last(pg, op) } +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b8() -> svbool_t { + svptrue_pat_b8::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b16() -> svbool_t { + svptrue_pat_b16::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b32() -> svbool_t { + svptrue_pat_b32::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b64() -> svbool_t { + svptrue_pat_b64::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b8() -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv16i1")] + fn _svptrue_pat_b8(pattern: svpattern) -> svbool_t; + } + unsafe { _svptrue_pat_b8(PATTERN) } +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b16() -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv8i1")] + fn _svptrue_pat_b16(pattern: svpattern) -> svbool8_t; + } + unsafe { _svptrue_pat_b16(PATTERN).sve_into() } +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b32() -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv4i1")] + fn _svptrue_pat_b32(pattern: svpattern) -> svbool4_t; + } + unsafe { _svptrue_pat_b32(PATTERN).sve_into() } +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b64() -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv2i1")] + fn _svptrue_pat_b64(pattern: svpattern) -> svbool2_t; + } + unsafe { _svptrue_pat_b64(PATTERN).sve_into() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv16i8" + )] + fn _svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_s8(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv8i16" + )] + fn _svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_s16(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv4i32" + )] + fn _svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_s32(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv2i64" + )] + fn _svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_s64(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv16i8" + )] + fn _svqadd_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv8i16" + )] + fn _svqadd_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv4i32" + )] + fn _svqadd_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv2i64" + )] + fn _svqadd_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_s32(op: i32) -> i32 { + svqdecb_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_s32(op: i32) -> i32 { + svqdech_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_s32(op: i32) -> i32 { + svqdecw_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_s32(op: i32) -> i32 { + svqdecd_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_s64(op: i64) -> i64 { + svqdecb_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_s64(op: i64) -> i64 { + svqdech_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_s64(op: i64) -> i64 { + svqdecw_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_s64(op: i64) -> i64 { + svqdecd_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_u32(op: u32) -> u32 { + svqdecb_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_u32(op: u32) -> u32 { + svqdech_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_u32(op: u32) -> u32 { + svqdecw_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_u32(op: u32) -> u32 { + svqdecd_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_u64(op: u64) -> u64 { + svqdecb_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_u64(op: u64) -> u64 { + svqdech_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_u64(op: u64) -> u64 { + svqdecw_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_u64(op: u64) -> u64 { + svqdecd_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecb.n32")] + fn _svqdecb_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecb_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.n32")] + fn _svqdech_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdech_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.n32")] + fn _svqdecw_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecw_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.n32")] + fn _svqdecd_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecd_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecb.n64")] + fn _svqdecb_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecb_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.n64")] + fn _svqdech_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdech_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.n64")] + fn _svqdecw_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecw_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.n64")] + fn _svqdecd_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecd_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecb.n32")] + fn _svqdecb_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecb_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.n32")] + fn _svqdech_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdech_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.n32")] + fn _svqdecw_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecw_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.n32")] + fn _svqdecd_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecd_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecb.n64")] + fn _svqdecb_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecb_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.n64")] + fn _svqdech_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdech_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.n64")] + fn _svqdecw_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecw_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.n64")] + fn _svqdecd_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecd_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_s16( + op: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.nxv8i16")] + fn _svqdech_pat_s16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqdech_pat_s16(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_s32( + op: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.nxv4i32")] + fn _svqdecw_pat_s32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqdecw_pat_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_s64( + op: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.nxv2i64")] + fn _svqdecd_pat_s64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqdecd_pat_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_u16( + op: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.nxv8i16")] + fn _svqdech_pat_u16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqdech_pat_u16(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_u32( + op: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.nxv4i32")] + fn _svqdecw_pat_u32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqdecw_pat_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_u64( + op: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.nxv2i64")] + fn _svqdecd_pat_u64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqdecd_pat_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))] +pub fn svqdech_s16(op: svint16_t) -> svint16_t { + svqdech_pat_s16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_s32(op: svint32_t) -> svint32_t { + svqdecw_pat_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_s64(op: svint64_t) -> svint64_t { + svqdecd_pat_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))] +pub fn svqdech_u16(op: svuint16_t) -> svuint16_t { + svqdech_pat_u16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_u32(op: svuint32_t) -> svuint32_t { + svqdecw_pat_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_u64(op: svuint64_t) -> svuint64_t { + svqdecd_pat_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b8(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv16i1" + )] + fn _svqdecp_n_s32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqdecp_n_s32_b8(op, pg) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b16(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv8i1" + )] + fn _svqdecp_n_s32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqdecp_n_s32_b16(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b32(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv4i1" + )] + fn _svqdecp_n_s32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqdecp_n_s32_b32(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b64(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv2i1" + )] + fn _svqdecp_n_s32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqdecp_n_s32_b64(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b8(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv16i1" + )] + fn _svqdecp_n_s64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqdecp_n_s64_b8(op, pg) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b16(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv8i1" + )] + fn _svqdecp_n_s64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqdecp_n_s64_b16(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b32(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv4i1" + )] + fn _svqdecp_n_s64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqdecp_n_s64_b32(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b64(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv2i1" + )] + fn _svqdecp_n_s64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqdecp_n_s64_b64(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b8(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv16i1" + )] + fn _svqdecp_n_u32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqdecp_n_u32_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b16(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv8i1" + )] + fn _svqdecp_n_u32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqdecp_n_u32_b16(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b32(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv4i1" + )] + fn _svqdecp_n_u32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqdecp_n_u32_b32(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b64(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv2i1" + )] + fn _svqdecp_n_u32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqdecp_n_u32_b64(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b8(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv16i1" + )] + fn _svqdecp_n_u64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqdecp_n_u64_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b16(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv8i1" + )] + fn _svqdecp_n_u64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqdecp_n_u64_b16(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b32(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv4i1" + )] + fn _svqdecp_n_u64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqdecp_n_u64_b32(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b64(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv2i1" + )] + fn _svqdecp_n_u64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqdecp_n_u64_b64(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_s16(op: svint16_t, pg: svbool_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv8i16")] + fn _svqdecp_s16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqdecp_s16(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_s32(op: svint32_t, pg: svbool_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv4i32")] + fn _svqdecp_s32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqdecp_s32(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_s64(op: svint64_t, pg: svbool_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv2i64")] + fn _svqdecp_s64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqdecp_s64(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_u16(op: svuint16_t, pg: svbool_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv8i16")] + fn _svqdecp_u16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqdecp_u16(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_u32(op: svuint32_t, pg: svbool_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv4i32")] + fn _svqdecp_u32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqdecp_u32(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_u64(op: svuint64_t, pg: svbool_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv2i64")] + fn _svqdecp_u64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqdecp_u64(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_s32(op: i32) -> i32 { + svqincb_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_s32(op: i32) -> i32 { + svqinch_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_s32(op: i32) -> i32 { + svqincw_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_s32(op: i32) -> i32 { + svqincd_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_s64(op: i64) -> i64 { + svqincb_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_s64(op: i64) -> i64 { + svqinch_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_s64(op: i64) -> i64 { + svqincw_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_s64(op: i64) -> i64 { + svqincd_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_u32(op: u32) -> u32 { + svqincb_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_u32(op: u32) -> u32 { + svqinch_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_u32(op: u32) -> u32 { + svqincw_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_u32(op: u32) -> u32 { + svqincd_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_u64(op: u64) -> u64 { + svqincb_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_u64(op: u64) -> u64 { + svqinch_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_u64(op: u64) -> u64 { + svqincw_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_u64(op: u64) -> u64 { + svqincd_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincb.n32")] + fn _svqincb_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincb_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.n32")] + fn _svqinch_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqinch_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.n32")] + fn _svqincw_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincw_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.n32")] + fn _svqincd_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincd_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincb.n64")] + fn _svqincb_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincb_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.n64")] + fn _svqinch_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqinch_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.n64")] + fn _svqincw_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincw_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.n64")] + fn _svqincd_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincd_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincb.n32")] + fn _svqincb_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincb_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.n32")] + fn _svqinch_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqinch_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.n32")] + fn _svqincw_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincw_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.n32")] + fn _svqincd_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincd_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincb.n64")] + fn _svqincb_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincb_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.n64")] + fn _svqinch_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqinch_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.n64")] + fn _svqincw_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincw_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.n64")] + fn _svqincd_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincd_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_s16( + op: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.nxv8i16")] + fn _svqinch_pat_s16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqinch_pat_s16(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_s32( + op: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.nxv4i32")] + fn _svqincw_pat_s32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqincw_pat_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_s64( + op: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.nxv2i64")] + fn _svqincd_pat_s64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqincd_pat_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_u16( + op: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.nxv8i16")] + fn _svqinch_pat_u16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqinch_pat_u16(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_u32( + op: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.nxv4i32")] + fn _svqincw_pat_u32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqincw_pat_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_u64( + op: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.nxv2i64")] + fn _svqincd_pat_u64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqincd_pat_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))] +pub fn svqinch_s16(op: svint16_t) -> svint16_t { + svqinch_pat_s16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))] +pub fn svqincw_s32(op: svint32_t) -> svint32_t { + svqincw_pat_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))] +pub fn svqincd_s64(op: svint64_t) -> svint64_t { + svqincd_pat_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))] +pub fn svqinch_u16(op: svuint16_t) -> svuint16_t { + svqinch_pat_u16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))] +pub fn svqincw_u32(op: svuint32_t) -> svuint32_t { + svqincw_pat_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))] +pub fn svqincd_u64(op: svuint64_t) -> svuint64_t { + svqincd_pat_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b8(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv16i1" + )] + fn _svqincp_n_s32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqincp_n_s32_b8(op, pg) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b16(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv8i1" + )] + fn _svqincp_n_s32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqincp_n_s32_b16(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b32(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv4i1" + )] + fn _svqincp_n_s32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqincp_n_s32_b32(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b64(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv2i1" + )] + fn _svqincp_n_s32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqincp_n_s32_b64(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b8(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv16i1" + )] + fn _svqincp_n_s64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqincp_n_s64_b8(op, pg) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b16(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv8i1" + )] + fn _svqincp_n_s64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqincp_n_s64_b16(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b32(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv4i1" + )] + fn _svqincp_n_s64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqincp_n_s64_b32(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b64(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv2i1" + )] + fn _svqincp_n_s64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqincp_n_s64_b64(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b8(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv16i1" + )] + fn _svqincp_n_u32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqincp_n_u32_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b16(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv8i1" + )] + fn _svqincp_n_u32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqincp_n_u32_b16(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b32(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv4i1" + )] + fn _svqincp_n_u32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqincp_n_u32_b32(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b64(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv2i1" + )] + fn _svqincp_n_u32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqincp_n_u32_b64(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b8(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv16i1" + )] + fn _svqincp_n_u64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqincp_n_u64_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b16(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv8i1" + )] + fn _svqincp_n_u64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqincp_n_u64_b16(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b32(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv4i1" + )] + fn _svqincp_n_u64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqincp_n_u64_b32(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b64(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv2i1" + )] + fn _svqincp_n_u64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqincp_n_u64_b64(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_s16(op: svint16_t, pg: svbool_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv8i16")] + fn _svqincp_s16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqincp_s16(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_s32(op: svint32_t, pg: svbool_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv4i32")] + fn _svqincp_s32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqincp_s32(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_s64(op: svint64_t, pg: svbool_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv2i64")] + fn _svqincp_s64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqincp_s64(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_u16(op: svuint16_t, pg: svbool_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv8i16")] + fn _svqincp_u16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqincp_u16(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_u32(op: svuint32_t, pg: svbool_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv4i32")] + fn _svqincp_u32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqincp_u32(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_u64(op: svuint64_t, pg: svbool_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv2i64")] + fn _svqincp_u64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqincp_u64(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv16i8" + )] + fn _svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_s8(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv8i16" + )] + fn _svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_s16(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv4i32" + )] + fn _svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_s32(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv2i64" + )] + fn _svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_s64(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv16i8" + )] + fn _svqsub_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv8i16" + )] + fn _svqsub_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv4i32" + )] + fn _svqsub_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv2i64" + )] + fn _svqsub_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv16i8")] + fn _svrbit_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svrbit_s8_m(inactive, pg, op) } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svrbit_s8_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svrbit_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv8i16")] + fn _svrbit_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svrbit_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svrbit_s16_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svrbit_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv4i32")] + fn _svrbit_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrbit_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svrbit_s32_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svrbit_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv2i64")] + fn _svrbit_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrbit_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrbit_s64_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrbit_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svrbit_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svrbit_u8_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svrbit_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svrbit_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrbit_u16_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrbit_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svrbit_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrbit_u32_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrbit_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrbit_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrbit_u64_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrbit_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Read FFR, returning predicate of succesfully loaded elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrdffr)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rdffr))] +pub fn svrdffr() -> svbool_t { + svrdffr_z(svptrue_b8()) +} +#[doc = "Read FFR, returning predicate of succesfully loaded elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrdffr_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rdffr))] +pub fn svrdffr_z(pg: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rdffr.z")] + fn _svrdffr_z(pg: svbool_t) -> svbool_t; + } + unsafe { _svrdffr_z(pg) } +} +#[doc = "Reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpe))] +pub fn svrecpe_f32(op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpe.x.nxv4f32" + )] + fn _svrecpe_f32(op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrecpe_f32(op) } +} +#[doc = "Reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpe))] +pub fn svrecpe_f64(op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpe.x.nxv2f64" + )] + fn _svrecpe_f64(op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrecpe_f64(op) } +} +#[doc = "Reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecps))] +pub fn svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecps.x.nxv4f32" + )] + fn _svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrecps_f32(op1, op2) } +} +#[doc = "Reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecps))] +pub fn svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecps.x.nxv2f64" + )] + fn _svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrecps_f64(op1, op2) } +} +#[doc = "Reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpx.x.nxv4f32" + )] + fn _svrecpx_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrecpx_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrecpx_f32_m(op, pg, op) +} +#[doc = "Reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrecpx_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpx.x.nxv2f64" + )] + fn _svrecpx_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrecpx_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrecpx_f64_m(op, pg, op) +} +#[doc = "Reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrecpx_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_f32(op: svfloat32_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_f64(op: svfloat64_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_s8(op: svint8_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_s16(op: svint16_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_s32(op: svint32_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_s64(op: svint64_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_u8(op: svuint8_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_u16(op: svuint16_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_u32(op: svuint32_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_u64(op: svuint64_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_f32(op: svfloat32_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_f64(op: svfloat64_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_s8(op: svint8_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_s16(op: svint16_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_s32(op: svint32_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_s64(op: svint64_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_u8(op: svuint8_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_u16(op: svuint16_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_u32(op: svuint32_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_u64(op: svuint64_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_f32(op: svfloat32_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_f64(op: svfloat64_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_s8(op: svint8_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_s16(op: svint16_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_s32(op: svint32_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_s64(op: svint64_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_u8(op: svuint8_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_u16(op: svuint16_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_u32(op: svuint32_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_u64(op: svuint64_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_f32(op: svfloat32_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_f64(op: svfloat64_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_s8(op: svint8_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_s16(op: svint16_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_s32(op: svint32_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_s64(op: svint64_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_u8(op: svuint8_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_u16(op: svuint16_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_u32(op: svuint32_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_u64(op: svuint64_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_f32(op: svfloat32_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_f64(op: svfloat64_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_s8(op: svint8_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_s16(op: svint16_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_s32(op: svint32_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_s64(op: svint64_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_u8(op: svuint8_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_u16(op: svuint16_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_u32(op: svuint32_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_u64(op: svuint64_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_f32(op: svfloat32_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_f64(op: svfloat64_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_s8(op: svint8_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_s16(op: svint16_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_s32(op: svint32_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_s64(op: svint64_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_u8(op: svuint8_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_u16(op: svuint16_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_u32(op: svuint32_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_u64(op: svuint64_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_f32(op: svfloat32_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_f64(op: svfloat64_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_s8(op: svint8_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_s16(op: svint16_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_s32(op: svint32_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_s64(op: svint64_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_u8(op: svuint8_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_u16(op: svuint16_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_u32(op: svuint32_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_u64(op: svuint64_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_f32(op: svfloat32_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_f64(op: svfloat64_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_s8(op: svint8_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_s16(op: svint16_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_s32(op: svint32_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_s64(op: svint64_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_u8(op: svuint8_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_u16(op: svuint16_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_u32(op: svuint32_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_u64(op: svuint64_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_f32(op: svfloat32_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_f64(op: svfloat64_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_s8(op: svint8_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_s16(op: svint16_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_s32(op: svint32_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_s64(op: svint64_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_u8(op: svuint8_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_u16(op: svuint16_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_u32(op: svuint32_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_u64(op: svuint64_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_f32(op: svfloat32_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_f64(op: svfloat64_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_s8(op: svint8_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_s16(op: svint16_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_s32(op: svint32_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_s64(op: svint64_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_u8(op: svuint8_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_u16(op: svuint16_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_u32(op: svuint32_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_u64(op: svuint64_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b8(op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv16i1")] + fn _svrev_b8(op: svbool_t) -> svbool_t; + } + unsafe { _svrev_b8(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b16(op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv8i1")] + fn _svrev_b16(op: svbool8_t) -> svbool8_t; + } + unsafe { _svrev_b16(op.sve_into()).sve_into() } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b32(op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4i1")] + fn _svrev_b32(op: svbool4_t) -> svbool4_t; + } + unsafe { _svrev_b32(op.sve_into()).sve_into() } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b64(op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2i1")] + fn _svrev_b64(op: svbool2_t) -> svbool2_t; + } + unsafe { _svrev_b64(op.sve_into()).sve_into() } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_f32(op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4f32")] + fn _svrev_f32(op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrev_f32(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_f64(op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2f64")] + fn _svrev_f64(op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrev_f64(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s8(op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv16i8")] + fn _svrev_s8(op: svint8_t) -> svint8_t; + } + unsafe { _svrev_s8(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s16(op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv8i16")] + fn _svrev_s16(op: svint16_t) -> svint16_t; + } + unsafe { _svrev_s16(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s32(op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4i32")] + fn _svrev_s32(op: svint32_t) -> svint32_t; + } + unsafe { _svrev_s32(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s64(op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2i64")] + fn _svrev_s64(op: svint64_t) -> svint64_t; + } + unsafe { _svrev_s64(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u8(op: svuint8_t) -> svuint8_t { + unsafe { svrev_s8(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u16(op: svuint16_t) -> svuint16_t { + unsafe { svrev_s16(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u32(op: svuint32_t) -> svuint32_t { + unsafe { svrev_s32(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u64(op: svuint64_t) -> svuint64_t { + unsafe { svrev_s64(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv8i16")] + fn _svrevb_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svrevb_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svrevb_s16_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svrevb_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv4i32")] + fn _svrevb_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrevb_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevb_s32_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevb_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv2i64")] + fn _svrevb_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrevb_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevb_s64_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevb_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svrevb_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrevb_u16_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrevb_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svrevb_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevb_u32_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevb_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrevb_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevb_u64_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevb_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revh.nxv4i32")] + fn _svrevh_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrevh_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevh_s32_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevh_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revh.nxv2i64")] + fn _svrevh_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrevh_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevh_s64_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevh_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svrevh_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevh_u32_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevh_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrevh_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevh_u64_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevh_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revw.nxv2i64")] + fn _svrevw_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrevw_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse words within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevw_s64_m(op, pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevw_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrevw_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse words within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevw_u64_m(op, pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevw_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinta.nxv4f32")] + fn _svrinta_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrinta_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinta_f32_m(op, pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinta_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinta.nxv2f64")] + fn _svrinta_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrinta_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinta_f64_m(op, pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinta_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinti.nxv4f32")] + fn _svrinti_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrinti_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinti_f32_m(op, pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinti_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinti.nxv2f64")] + fn _svrinti_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrinti_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinti_f64_m(op, pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinti_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round towards -∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintm.nxv4f32")] + fn _svrintm_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintm_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round towards -∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintm_f32_m(op, pg, op) +} +#[doc = "Round towards -∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintm_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round towards -∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintm.nxv2f64")] + fn _svrintm_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintm_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round towards -∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintm_f64_m(op, pg, op) +} +#[doc = "Round towards -∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintm_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintn.nxv4f32")] + fn _svrintn_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintn_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round to nearest, ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintn_f32_m(op, pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintn_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintn.nxv2f64")] + fn _svrintn_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintn_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round to nearest, ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintn_f64_m(op, pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintn_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round towards +∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintp.nxv4f32")] + fn _svrintp_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintp_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round towards +∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintp_f32_m(op, pg, op) +} +#[doc = "Round towards +∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintp_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round towards +∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintp.nxv2f64")] + fn _svrintp_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintp_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round towards +∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintp_f64_m(op, pg, op) +} +#[doc = "Round towards +∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintp_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintx.nxv4f32")] + fn _svrintx_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintx_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintx_f32_m(op, pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintx_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintx.nxv2f64")] + fn _svrintx_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintx_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintx_f64_m(op, pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintx_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round towards zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintz.nxv4f32")] + fn _svrintz_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintz_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round towards zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintz_f32_m(op, pg, op) +} +#[doc = "Round towards zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintz_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round towards zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintz.nxv2f64")] + fn _svrintz_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintz_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round towards zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintz_f64_m(op, pg, op) +} +#[doc = "Round towards zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintz_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frsqrte))] +pub fn svrsqrte_f32(op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrte.x.nxv4f32" + )] + fn _svrsqrte_f32(op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrsqrte_f32(op) } +} +#[doc = "Reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frsqrte))] +pub fn svrsqrte_f64(op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrte.x.nxv2f64" + )] + fn _svrsqrte_f64(op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrsqrte_f64(op) } +} +#[doc = "Reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frsqrts))] +pub fn svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrts.x.nxv4f32" + )] + fn _svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrsqrts_f32(op1, op2) } +} +#[doc = "Reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frsqrts))] +pub fn svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrts.x.nxv2f64" + )] + fn _svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrsqrts_f64(op1, op2) } +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fscale.nxv4f32")] + fn _svscale_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t; + } + unsafe { _svscale_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t { + svscale_f32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t { + svscale_f32_m(pg, op1, op2) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t { + svscale_f32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t { + svscale_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t { + svscale_f32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fscale.nxv2f64")] + fn _svscale_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t; + } + unsafe { _svscale_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t { + svscale_f64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t { + svscale_f64_m(pg, op1, op2) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t { + svscale_f64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t { + svscale_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t { + svscale_f64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_b])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_b(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe { simd_select(pg, op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe { simd_select::(pg, op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { simd_select::(pg, op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_f32(tuple: svfloat32x2_t, x: svfloat32_t) -> svfloat32x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_f64(tuple: svfloat64x2_t, x: svfloat64_t) -> svfloat64x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_s8(tuple: svint8x2_t, x: svint8_t) -> svint8x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_s16(tuple: svint16x2_t, x: svint16_t) -> svint16x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_s32(tuple: svint32x2_t, x: svint32_t) -> svint32x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_s64(tuple: svint64x2_t, x: svint64_t) -> svint64x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_u8(tuple: svuint8x2_t, x: svuint8_t) -> svuint8x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_u16(tuple: svuint16x2_t, x: svuint16_t) -> svuint16x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_u32(tuple: svuint32x2_t, x: svuint32_t) -> svuint32x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_u64(tuple: svuint64x2_t, x: svuint64_t) -> svuint64x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_f32(tuple: svfloat32x3_t, x: svfloat32_t) -> svfloat32x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_f64(tuple: svfloat64x3_t, x: svfloat64_t) -> svfloat64x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_s8(tuple: svint8x3_t, x: svint8_t) -> svint8x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_s16(tuple: svint16x3_t, x: svint16_t) -> svint16x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_s32(tuple: svint32x3_t, x: svint32_t) -> svint32x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_s64(tuple: svint64x3_t, x: svint64_t) -> svint64x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_u8(tuple: svuint8x3_t, x: svuint8_t) -> svuint8x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_u16(tuple: svuint16x3_t, x: svuint16_t) -> svuint16x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_u32(tuple: svuint32x3_t, x: svuint32_t) -> svuint32x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_u64(tuple: svuint64x3_t, x: svuint64_t) -> svuint64x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_f32(tuple: svfloat32x4_t, x: svfloat32_t) -> svfloat32x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_f64(tuple: svfloat64x4_t, x: svfloat64_t) -> svfloat64x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_s8(tuple: svint8x4_t, x: svint8_t) -> svint8x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_s16(tuple: svint16x4_t, x: svint16_t) -> svint16x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_s32(tuple: svint32x4_t, x: svint32_t) -> svint32x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_s64(tuple: svint64x4_t, x: svint64_t) -> svint64x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_u8(tuple: svuint8x4_t, x: svuint8_t) -> svuint8x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_u16(tuple: svuint16x4_t, x: svuint16_t) -> svuint16x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_u32(tuple: svuint32x4_t, x: svuint32_t) -> svuint32x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_u64(tuple: svuint64x4_t, x: svuint64_t) -> svuint64x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Initialize the first-fault register to all-true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsetffr)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(setffr))] +pub fn svsetffr() { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.setffr")] + fn _svsetffr(); + } + unsafe { _svsetffr() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv4f32")] + fn _svsplice_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsplice_f32(pg.sve_into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv2f64")] + fn _svsplice_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsplice_f64(pg.sve_into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv16i8")] + fn _svsplice_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsplice_s8(pg, op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv8i16")] + fn _svsplice_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsplice_s16(pg.sve_into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv4i32")] + fn _svsplice_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsplice_s32(pg.sve_into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv2i64")] + fn _svsplice_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsplice_s64(pg.sve_into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svsplice_s8(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svsplice_s16(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svsplice_s32(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svsplice_s64(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Square root"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsqrt.nxv4f32")] + fn _svsqrt_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsqrt_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Square root"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svsqrt_f32_m(op, pg, op) +} +#[doc = "Square root"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svsqrt_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Square root"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsqrt.nxv2f64")] + fn _svsqrt_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsqrt_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Square root"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svsqrt_f64_m(op, pg, op) +} +#[doc = "Square root"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svsqrt_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4f32")] + fn _svst1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + } + _svst1_f32(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2f64")] + fn _svst1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + } + _svst1_f64(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv16i8")] + fn _svst1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svst1_s8(data, pg, base) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i16")] + fn _svst1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16); + } + _svst1_s16(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i32")] + fn _svst1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32); + } + _svst1_s32(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i64")] + fn _svst1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64); + } + _svst1_s64(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) { + svst1_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) { + svst1_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) { + svst1_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) { + svst1_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32index_f32( + pg: svbool_t, + base: *mut f32, + indices: svint32_t, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4f32" + )] + fn _svst1_scatter_s32index_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + indices: svint32_t, + ); + } + _svst1_scatter_s32index_f32(data, pg.sve_into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32index_s32( + pg: svbool_t, + base: *mut i32, + indices: svint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i32" + )] + fn _svst1_scatter_s32index_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + indices: svint32_t, + ); + } + _svst1_scatter_s32index_s32(data, pg.sve_into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32index_u32( + pg: svbool_t, + base: *mut u32, + indices: svint32_t, + data: svuint32_t, +) { + svst1_scatter_s32index_s32(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svint64_t, + data: svfloat64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2f64" + )] + fn _svst1_scatter_s64index_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + indices: svint64_t, + ); + } + _svst1_scatter_s64index_f64(data, pg.sve_into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i64" + )] + fn _svst1_scatter_s64index_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + indices: svint64_t, + ); + } + _svst1_scatter_s64index_s64(data, pg.sve_into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svint64_t, + data: svuint64_t, +) { + svst1_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32index_f32( + pg: svbool_t, + base: *mut f32, + indices: svuint32_t, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4f32" + )] + fn _svst1_scatter_u32index_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + indices: svint32_t, + ); + } + _svst1_scatter_u32index_f32(data, pg.sve_into(), base, indices.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32index_s32( + pg: svbool_t, + base: *mut i32, + indices: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i32" + )] + fn _svst1_scatter_u32index_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + indices: svint32_t, + ); + } + _svst1_scatter_u32index_s32(data, pg.sve_into(), base, indices.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32index_u32( + pg: svbool_t, + base: *mut u32, + indices: svuint32_t, + data: svuint32_t, +) { + svst1_scatter_u32index_s32(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svuint64_t, + data: svfloat64_t, +) { + svst1_scatter_s64index_f64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svuint64_t, + data: svint64_t, +) { + svst1_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svuint64_t, + data: svuint64_t, +) { + svst1_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32offset_f32( + pg: svbool_t, + base: *mut f32, + offsets: svint32_t, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4f32" + )] + fn _svst1_scatter_s32offset_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + offsets: svint32_t, + ); + } + _svst1_scatter_s32offset_f32(data, pg.sve_into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32offset_s32( + pg: svbool_t, + base: *mut i32, + offsets: svint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i32" + )] + fn _svst1_scatter_s32offset_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + offsets: svint32_t, + ); + } + _svst1_scatter_s32offset_s32(data, pg.sve_into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32offset_u32( + pg: svbool_t, + base: *mut u32, + offsets: svint32_t, + data: svuint32_t, +) { + svst1_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svint64_t, + data: svfloat64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2f64" + )] + fn _svst1_scatter_s64offset_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + offsets: svint64_t, + ); + } + _svst1_scatter_s64offset_f64(data, pg.sve_into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i64" + )] + fn _svst1_scatter_s64offset_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + offsets: svint64_t, + ); + } + _svst1_scatter_s64offset_s64(data, pg.sve_into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svint64_t, + data: svuint64_t, +) { + svst1_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32offset_f32( + pg: svbool_t, + base: *mut f32, + offsets: svuint32_t, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4f32" + )] + fn _svst1_scatter_u32offset_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + offsets: svint32_t, + ); + } + _svst1_scatter_u32offset_f32(data, pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i32, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i32" + )] + fn _svst1_scatter_u32offset_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + offsets: svint32_t, + ); + } + _svst1_scatter_u32offset_s32(data, pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u32, + offsets: svuint32_t, + data: svuint32_t, +) { + svst1_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svuint64_t, + data: svfloat64_t, +) { + svst1_scatter_s64offset_f64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svuint64_t, + data: svint64_t, +) { + svst1_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_f32(pg: svbool_t, bases: svuint32_t, data: svfloat32_t) { + svst1_scatter_u32base_offset_f32(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svst1_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svst1_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_f64(pg: svbool_t, bases: svuint64_t, data: svfloat64_t) { + svst1_scatter_u64base_offset_f64(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svfloat32_t, +) { + svst1_scatter_u32base_offset_f32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svst1_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svst1_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svfloat64_t, +) { + svst1_scatter_u64base_offset_f64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svst1_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svst1_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svst1_scatter_u32base_offset_f32( + data: svfloat32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1_scatter_u32base_offset_f32(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svst1_scatter_u32base_offset_s32( + data: svint32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1_scatter_u32base_offset_s32(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svst1_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svfloat64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svst1_scatter_u64base_offset_f64( + data: svfloat64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1_scatter_u64base_offset_f64(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svst1_scatter_u64base_offset_s64( + data: svint64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1_scatter_u64base_offset_s64(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32_t) { + svst1_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64_t) { + svst1_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8_t) { + svst1_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16_t) { + svst1_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32_t) { + svst1_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64_t) { + svst1_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8_t) { + svst1_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16_t) { + svst1_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32_t) { + svst1_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64_t) { + svst1_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_s16(pg: svbool_t, base: *mut i8, data: svint16_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i8")] + fn _svst1b_s16(data: nxv8i8, pg: svbool8_t, ptr: *mut i8); + } + _svst1b_s16( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_s32(pg: svbool_t, base: *mut i8, data: svint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i8")] + fn _svst1b_s32(data: nxv4i8, pg: svbool4_t, ptr: *mut i8); + } + _svst1b_s32( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_s32(pg: svbool_t, base: *mut i16, data: svint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i16")] + fn _svst1h_s32(data: nxv4i16, pg: svbool4_t, ptr: *mut i16); + } + _svst1h_s32( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_s64(pg: svbool_t, base: *mut i8, data: svint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i8")] + fn _svst1b_s64(data: nxv2i8, pg: svbool2_t, ptr: *mut i8); + } + _svst1b_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_s64(pg: svbool_t, base: *mut i16, data: svint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i16")] + fn _svst1h_s64(data: nxv2i16, pg: svbool2_t, ptr: *mut i16); + } + _svst1h_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + ) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_s64(pg: svbool_t, base: *mut i32, data: svint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i32")] + fn _svst1w_s64(data: nxv2i32, pg: svbool2_t, ptr: *mut i32); + } + _svst1w_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_u16(pg: svbool_t, base: *mut u8, data: svuint16_t) { + svst1b_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_u32(pg: svbool_t, base: *mut u8, data: svuint32_t) { + svst1b_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_u32(pg: svbool_t, base: *mut u16, data: svuint32_t) { + svst1h_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_u64(pg: svbool_t, base: *mut u8, data: svuint64_t) { + svst1b_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_u64(pg: svbool_t, base: *mut u16, data: svuint64_t) { + svst1h_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_u64(pg: svbool_t, base: *mut u32, data: svuint64_t) { + svst1w_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s32offset_s32( + pg: svbool_t, + base: *mut i8, + offsets: svint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i8" + )] + fn _svst1b_scatter_s32offset_s32( + data: nxv4i8, + pg: svbool4_t, + base: *mut i8, + offsets: svint32_t, + ); + } + _svst1b_scatter_s32offset_s32( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32offset_s32( + pg: svbool_t, + base: *mut i16, + offsets: svint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i16" + )] + fn _svst1h_scatter_s32offset_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + offsets: svint32_t, + ); + } + _svst1h_scatter_s32offset_s32( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s32offset_u32( + pg: svbool_t, + base: *mut u8, + offsets: svint32_t, + data: svuint32_t, +) { + svst1b_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32offset_u32( + pg: svbool_t, + base: *mut u16, + offsets: svint32_t, + data: svuint32_t, +) { + svst1h_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i8" + )] + fn _svst1b_scatter_s64offset_s64( + data: nxv2i8, + pg: svbool2_t, + base: *mut i8, + offsets: svint64_t, + ); + } + _svst1b_scatter_s64offset_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i16" + )] + fn _svst1h_scatter_s64offset_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + offsets: svint64_t, + ); + } + _svst1h_scatter_s64offset_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i32" + )] + fn _svst1w_scatter_s64offset_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + offsets: svint64_t, + ); + } + _svst1w_scatter_s64offset_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svint64_t, + data: svuint64_t, +) { + svst1b_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i8, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i8" + )] + fn _svst1b_scatter_u32offset_s32( + data: nxv4i8, + pg: svbool4_t, + base: *mut i8, + offsets: svint32_t, + ); + } + _svst1b_scatter_u32offset_s32( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + offsets.as_signed(), + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i16, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i16" + )] + fn _svst1h_scatter_u32offset_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + offsets: svint32_t, + ); + } + _svst1h_scatter_u32offset_s32( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + offsets.as_signed(), + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u8, + offsets: svuint32_t, + data: svuint32_t, +) { + svst1b_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u16, + offsets: svuint32_t, + data: svuint32_t, +) { + svst1h_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svuint64_t, + data: svint64_t, +) { + svst1b_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svuint64_t, + data: svint64_t, +) { + svst1h_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svuint64_t, + data: svint64_t, +) { + svst1w_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1b_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base]_offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svst1b_scatter_u32base_offset_s32( + data: nxv4i8, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1b_scatter_u32base_offset_s32( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svst1h_scatter_u32base_offset_s32( + data: nxv4i16, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1h_scatter_u32base_offset_s32( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base]_offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svst1b_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svst1h_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svst1b_scatter_u64base_offset_s64( + data: nxv2i8, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1b_scatter_u64base_offset_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svst1h_scatter_u64base_offset_s64( + data: nxv2i16, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1h_scatter_u64base_offset_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svst1w_scatter_u64base_offset_s64( + data: nxv2i32, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1w_scatter_u64base_offset_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1b_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1h_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1w_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svst1b_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svst1h_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svst1b_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svst1h_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1b_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1h_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1w_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1b_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1h_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1w_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_s16(pg: svbool_t, base: *mut i8, vnum: i64, data: svint16_t) { + svst1b_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_s32(pg: svbool_t, base: *mut i8, vnum: i64, data: svint32_t) { + svst1b_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_s32(pg: svbool_t, base: *mut i16, vnum: i64, data: svint32_t) { + svst1h_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_s64(pg: svbool_t, base: *mut i8, vnum: i64, data: svint64_t) { + svst1b_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_s64(pg: svbool_t, base: *mut i16, vnum: i64, data: svint64_t) { + svst1h_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_vnum_s64(pg: svbool_t, base: *mut i32, vnum: i64, data: svint64_t) { + svst1w_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_u16(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint16_t) { + svst1b_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_u32(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint32_t) { + svst1b_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_u32(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint32_t) { + svst1h_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_u64(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint64_t) { + svst1b_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_u64(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint64_t) { + svst1h_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_vnum_u64(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint64_t) { + svst1w_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32index_s32( + pg: svbool_t, + base: *mut i16, + indices: svint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i16" + )] + fn _svst1h_scatter_s32index_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + indices: svint32_t, + ); + } + _svst1h_scatter_s32index_s32( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + indices, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32index_u32( + pg: svbool_t, + base: *mut u16, + indices: svint32_t, + data: svuint32_t, +) { + svst1h_scatter_s32index_s32(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i16" + )] + fn _svst1h_scatter_s64index_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + indices: svint64_t, + ); + } + _svst1h_scatter_s64index_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + indices, + ) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i32" + )] + fn _svst1w_scatter_s64index_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + indices: svint64_t, + ); + } + _svst1w_scatter_s64index_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + indices, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32index_s32( + pg: svbool_t, + base: *mut i16, + indices: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i16" + )] + fn _svst1h_scatter_u32index_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + indices: svint32_t, + ); + } + _svst1h_scatter_u32index_s32( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + indices.as_signed(), + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32index_u32( + pg: svbool_t, + base: *mut u16, + indices: svuint32_t, + data: svuint32_t, +) { + svst1h_scatter_u32index_s32(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svuint64_t, + data: svint64_t, +) { + svst1h_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svuint64_t, + data: svint64_t, +) { + svst1w_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svuint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svuint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svst1h_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svst1h_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svst1h_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svst1w_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svst1h_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svst1w_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_f32(pg: svbool_t, base: *mut f32, data: svfloat32x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv4f32")] + fn _svst2_f32(data0: svfloat32_t, data1: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + } + _svst2_f32( + svget2_f32::<0>(data), + svget2_f32::<1>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_f64(pg: svbool_t, base: *mut f64, data: svfloat64x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv2f64")] + fn _svst2_f64(data0: svfloat64_t, data1: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + } + _svst2_f64( + svget2_f64::<0>(data), + svget2_f64::<1>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_s8(pg: svbool_t, base: *mut i8, data: svint8x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv16i8")] + fn _svst2_s8(data0: svint8_t, data1: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svst2_s8(svget2_s8::<0>(data), svget2_s8::<1>(data), pg, base) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_s16(pg: svbool_t, base: *mut i16, data: svint16x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv8i16")] + fn _svst2_s16(data0: svint16_t, data1: svint16_t, pg: svbool8_t, ptr: *mut i16); + } + _svst2_s16( + svget2_s16::<0>(data), + svget2_s16::<1>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_s32(pg: svbool_t, base: *mut i32, data: svint32x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv4i32")] + fn _svst2_s32(data0: svint32_t, data1: svint32_t, pg: svbool4_t, ptr: *mut i32); + } + _svst2_s32( + svget2_s32::<0>(data), + svget2_s32::<1>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_s64(pg: svbool_t, base: *mut i64, data: svint64x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv2i64")] + fn _svst2_s64(data0: svint64_t, data1: svint64_t, pg: svbool2_t, ptr: *mut i64); + } + _svst2_s64( + svget2_s64::<0>(data), + svget2_s64::<1>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_u8(pg: svbool_t, base: *mut u8, data: svuint8x2_t) { + svst2_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_u16(pg: svbool_t, base: *mut u16, data: svuint16x2_t) { + svst2_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_u32(pg: svbool_t, base: *mut u32, data: svuint32x2_t) { + svst2_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_u64(pg: svbool_t, base: *mut u64, data: svuint64x2_t) { + svst2_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x2_t) { + svst2_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x2_t) { + svst2_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x2_t) { + svst2_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x2_t) { + svst2_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x2_t) { + svst2_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x2_t) { + svst2_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x2_t) { + svst2_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x2_t) { + svst2_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x2_t) { + svst2_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x2_t) { + svst2_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_f32(pg: svbool_t, base: *mut f32, data: svfloat32x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv4f32")] + fn _svst3_f32( + data0: svfloat32_t, + data1: svfloat32_t, + data2: svfloat32_t, + pg: svbool4_t, + ptr: *mut f32, + ); + } + _svst3_f32( + svget3_f32::<0>(data), + svget3_f32::<1>(data), + svget3_f32::<2>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_f64(pg: svbool_t, base: *mut f64, data: svfloat64x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv2f64")] + fn _svst3_f64( + data0: svfloat64_t, + data1: svfloat64_t, + data2: svfloat64_t, + pg: svbool2_t, + ptr: *mut f64, + ); + } + _svst3_f64( + svget3_f64::<0>(data), + svget3_f64::<1>(data), + svget3_f64::<2>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_s8(pg: svbool_t, base: *mut i8, data: svint8x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv16i8")] + fn _svst3_s8(data0: svint8_t, data1: svint8_t, data2: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svst3_s8( + svget3_s8::<0>(data), + svget3_s8::<1>(data), + svget3_s8::<2>(data), + pg, + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_s16(pg: svbool_t, base: *mut i16, data: svint16x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv8i16")] + fn _svst3_s16( + data0: svint16_t, + data1: svint16_t, + data2: svint16_t, + pg: svbool8_t, + ptr: *mut i16, + ); + } + _svst3_s16( + svget3_s16::<0>(data), + svget3_s16::<1>(data), + svget3_s16::<2>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_s32(pg: svbool_t, base: *mut i32, data: svint32x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv4i32")] + fn _svst3_s32( + data0: svint32_t, + data1: svint32_t, + data2: svint32_t, + pg: svbool4_t, + ptr: *mut i32, + ); + } + _svst3_s32( + svget3_s32::<0>(data), + svget3_s32::<1>(data), + svget3_s32::<2>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_s64(pg: svbool_t, base: *mut i64, data: svint64x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv2i64")] + fn _svst3_s64( + data0: svint64_t, + data1: svint64_t, + data2: svint64_t, + pg: svbool2_t, + ptr: *mut i64, + ); + } + _svst3_s64( + svget3_s64::<0>(data), + svget3_s64::<1>(data), + svget3_s64::<2>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_u8(pg: svbool_t, base: *mut u8, data: svuint8x3_t) { + svst3_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_u16(pg: svbool_t, base: *mut u16, data: svuint16x3_t) { + svst3_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_u32(pg: svbool_t, base: *mut u32, data: svuint32x3_t) { + svst3_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_u64(pg: svbool_t, base: *mut u64, data: svuint64x3_t) { + svst3_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x3_t) { + svst3_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x3_t) { + svst3_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x3_t) { + svst3_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x3_t) { + svst3_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x3_t) { + svst3_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x3_t) { + svst3_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x3_t) { + svst3_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x3_t) { + svst3_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x3_t) { + svst3_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x3_t) { + svst3_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_f32(pg: svbool_t, base: *mut f32, data: svfloat32x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv4f32")] + fn _svst4_f32( + data0: svfloat32_t, + data1: svfloat32_t, + data2: svfloat32_t, + data3: svfloat32_t, + pg: svbool4_t, + ptr: *mut f32, + ); + } + _svst4_f32( + svget4_f32::<0>(data), + svget4_f32::<1>(data), + svget4_f32::<2>(data), + svget4_f32::<3>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_f64(pg: svbool_t, base: *mut f64, data: svfloat64x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv2f64")] + fn _svst4_f64( + data0: svfloat64_t, + data1: svfloat64_t, + data2: svfloat64_t, + data3: svfloat64_t, + pg: svbool2_t, + ptr: *mut f64, + ); + } + _svst4_f64( + svget4_f64::<0>(data), + svget4_f64::<1>(data), + svget4_f64::<2>(data), + svget4_f64::<3>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_s8(pg: svbool_t, base: *mut i8, data: svint8x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv16i8")] + fn _svst4_s8( + data0: svint8_t, + data1: svint8_t, + data2: svint8_t, + data3: svint8_t, + pg: svbool_t, + ptr: *mut i8, + ); + } + _svst4_s8( + svget4_s8::<0>(data), + svget4_s8::<1>(data), + svget4_s8::<2>(data), + svget4_s8::<3>(data), + pg, + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_s16(pg: svbool_t, base: *mut i16, data: svint16x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv8i16")] + fn _svst4_s16( + data0: svint16_t, + data1: svint16_t, + data2: svint16_t, + data3: svint16_t, + pg: svbool8_t, + ptr: *mut i16, + ); + } + _svst4_s16( + svget4_s16::<0>(data), + svget4_s16::<1>(data), + svget4_s16::<2>(data), + svget4_s16::<3>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_s32(pg: svbool_t, base: *mut i32, data: svint32x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv4i32")] + fn _svst4_s32( + data0: svint32_t, + data1: svint32_t, + data2: svint32_t, + data3: svint32_t, + pg: svbool4_t, + ptr: *mut i32, + ); + } + _svst4_s32( + svget4_s32::<0>(data), + svget4_s32::<1>(data), + svget4_s32::<2>(data), + svget4_s32::<3>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_s64(pg: svbool_t, base: *mut i64, data: svint64x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv2i64")] + fn _svst4_s64( + data0: svint64_t, + data1: svint64_t, + data2: svint64_t, + data3: svint64_t, + pg: svbool2_t, + ptr: *mut i64, + ); + } + _svst4_s64( + svget4_s64::<0>(data), + svget4_s64::<1>(data), + svget4_s64::<2>(data), + svget4_s64::<3>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_u8(pg: svbool_t, base: *mut u8, data: svuint8x4_t) { + svst4_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_u16(pg: svbool_t, base: *mut u16, data: svuint16x4_t) { + svst4_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_u32(pg: svbool_t, base: *mut u32, data: svuint32x4_t) { + svst4_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_u64(pg: svbool_t, base: *mut u64, data: svuint64x4_t) { + svst4_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x4_t) { + svst4_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x4_t) { + svst4_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x4_t) { + svst4_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x4_t) { + svst4_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x4_t) { + svst4_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x4_t) { + svst4_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x4_t) { + svst4_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x4_t) { + svst4_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x4_t) { + svst4_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x4_t) { + svst4_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv4f32")] + fn _svstnt1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + } + _svstnt1_f32(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv2f64")] + fn _svstnt1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + } + _svstnt1_f64(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv16i8")] + fn _svstnt1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svstnt1_s8(data, pg, base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv8i16")] + fn _svstnt1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16); + } + _svstnt1_s16(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv4i32")] + fn _svstnt1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32); + } + _svstnt1_s32(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv2i64")] + fn _svstnt1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64); + } + _svstnt1_s64(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) { + svstnt1_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) { + svstnt1_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) { + svstnt1_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) { + svstnt1_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32_t) { + svstnt1_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64_t) { + svstnt1_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8_t) { + svstnt1_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16_t) { + svstnt1_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32_t) { + svstnt1_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64_t) { + svstnt1_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8_t) { + svstnt1_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16_t) { + svstnt1_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32_t) { + svstnt1_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64_t) { + svstnt1_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv4f32")] + fn _svsub_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsub_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsub_f32_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsub_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv2f64")] + fn _svsub_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsub_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsub_f64_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsub_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv16i8")] + fn _svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsub_s8_m(pg, op1, op2) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsub_s8_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv8i16")] + fn _svsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsub_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsub_s16_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv4i32")] + fn _svsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsub_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsub_s32_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv2i64")] + fn _svsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsub_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsub_s64_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svsub_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsub_u8_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svsub_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsub_u16_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svsub_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsub_u32_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svsub_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsub_u64_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsubr.nxv4f32")] + fn _svsubr_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsubr_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsubr_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsubr_f32_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsubr_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsubr_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsubr_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsubr.nxv2f64")] + fn _svsubr_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsubr_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsubr_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsubr_f64_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsubr_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsubr_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsubr_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv16i8")] + fn _svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsubr_s8_m(pg, op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsubr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsubr_s8_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsubr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsubr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv8i16")] + fn _svsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsubr_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsubr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsubr_s16_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsubr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsubr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv4i32")] + fn _svsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsubr_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsubr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsubr_s32_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsubr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsubr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv2i64")] + fn _svsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsubr_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsubr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsubr_s64_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsubr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsubr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svsubr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsubr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsubr_u8_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsubr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsubr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svsubr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsubr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsubr_u16_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsubr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsubr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svsubr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsubr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsubr_u32_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsubr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsubr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svsubr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsubr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsubr_u64_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsubr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsubr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Dot product (signed × unsigned)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sudot, IMM_INDEX = 0))] +pub fn svsudot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svuint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sudot.lane.nxv4i32" + )] + fn _svsudot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { _svsudot_lane_s32(op1, op2, op3.as_signed(), IMM_INDEX) } +} +#[doc = "Dot product (signed × unsigned)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svsudot_s32(op1: svint32_t, op2: svint8_t, op3: svuint8_t) -> svint32_t { + svusdot_s32(op1, op3, op2) +} +#[doc = "Dot product (signed × unsigned)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svsudot_n_s32(op1: svint32_t, op2: svint8_t, op3: u8) -> svint32_t { + svsudot_s32(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_f32(data: svfloat32_t, indices: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv4f32")] + fn _svtbl_f32(data: svfloat32_t, indices: svint32_t) -> svfloat32_t; + } + unsafe { _svtbl_f32(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_f64(data: svfloat64_t, indices: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv2f64")] + fn _svtbl_f64(data: svfloat64_t, indices: svint64_t) -> svfloat64_t; + } + unsafe { _svtbl_f64(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s8(data: svint8_t, indices: svuint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv16i8")] + fn _svtbl_s8(data: svint8_t, indices: svint8_t) -> svint8_t; + } + unsafe { _svtbl_s8(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s16(data: svint16_t, indices: svuint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv8i16")] + fn _svtbl_s16(data: svint16_t, indices: svint16_t) -> svint16_t; + } + unsafe { _svtbl_s16(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s32(data: svint32_t, indices: svuint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv4i32")] + fn _svtbl_s32(data: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svtbl_s32(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s64(data: svint64_t, indices: svuint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv2i64")] + fn _svtbl_s64(data: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svtbl_s64(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u8(data: svuint8_t, indices: svuint8_t) -> svuint8_t { + unsafe { svtbl_s8(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u16(data: svuint16_t, indices: svuint16_t) -> svuint16_t { + unsafe { svtbl_s16(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u32(data: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svtbl_s32(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u64(data: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svtbl_s64(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Trigonometric multiply-add coefficient"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtmad[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ftmad, IMM3 = 0))] +pub fn svtmad_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + static_assert_range!(IMM3, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftmad.x.nxv4f32" + )] + fn _svtmad_f32(op1: svfloat32_t, op2: svfloat32_t, imm3: i32) -> svfloat32_t; + } + unsafe { _svtmad_f32(op1, op2, IMM3) } +} +#[doc = "Trigonometric multiply-add coefficient"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtmad[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ftmad, IMM3 = 0))] +pub fn svtmad_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + static_assert_range!(IMM3, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftmad.x.nxv2f64" + )] + fn _svtmad_f64(op1: svfloat64_t, op2: svfloat64_t, imm3: i32) -> svfloat64_t; + } + unsafe { _svtmad_f64(op1, op2, IMM3) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv16i1")] + fn _svtrn1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svtrn1_b8(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv8i1")] + fn _svtrn1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svtrn1_b16(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4i1")] + fn _svtrn1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svtrn1_b32(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2i1")] + fn _svtrn1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svtrn1_b64(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4f32")] + fn _svtrn1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svtrn1_f32(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2f64")] + fn _svtrn1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn1_f64(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv16i8")] + fn _svtrn1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svtrn1_s8(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv8i16")] + fn _svtrn1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svtrn1_s16(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4i32")] + fn _svtrn1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svtrn1_s32(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2i64")] + fn _svtrn1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svtrn1_s64(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv4f32")] + fn _svtrn1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svtrn1q_f32(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv2f64")] + fn _svtrn1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn1q_f64(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv16i8")] + fn _svtrn1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svtrn1q_s8(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv8i16")] + fn _svtrn1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svtrn1q_s16(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv4i32")] + fn _svtrn1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svtrn1q_s32(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv2i64")] + fn _svtrn1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svtrn1q_s64(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv16i1")] + fn _svtrn2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svtrn2_b8(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv8i1")] + fn _svtrn2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svtrn2_b16(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4i1")] + fn _svtrn2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svtrn2_b32(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2i1")] + fn _svtrn2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svtrn2_b64(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4f32")] + fn _svtrn2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svtrn2_f32(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2f64")] + fn _svtrn2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn2_f64(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv16i8")] + fn _svtrn2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svtrn2_s8(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv8i16")] + fn _svtrn2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svtrn2_s16(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4i32")] + fn _svtrn2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svtrn2_s32(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2i64")] + fn _svtrn2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svtrn2_s64(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv4f32")] + fn _svtrn2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svtrn2q_f32(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv2f64")] + fn _svtrn2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn2q_f64(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv16i8")] + fn _svtrn2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svtrn2q_s8(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv8i16")] + fn _svtrn2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svtrn2q_s16(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv4i32")] + fn _svtrn2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svtrn2q_s32(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv2i64")] + fn _svtrn2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svtrn2q_s64(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Trigonometric starting value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtsmul[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ftsmul))] +pub fn svtsmul_f32(op1: svfloat32_t, op2: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftsmul.x.nxv4f32" + )] + fn _svtsmul_f32(op1: svfloat32_t, op2: svint32_t) -> svfloat32_t; + } + unsafe { _svtsmul_f32(op1, op2.as_signed()) } +} +#[doc = "Trigonometric starting value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtsmul[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ftsmul))] +pub fn svtsmul_f64(op1: svfloat64_t, op2: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftsmul.x.nxv2f64" + )] + fn _svtsmul_f64(op1: svfloat64_t, op2: svint64_t) -> svfloat64_t; + } + unsafe { _svtsmul_f64(op1, op2.as_signed()) } +} +#[doc = "Trigonometric select coefficient"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtssel[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ftssel))] +pub fn svtssel_f32(op1: svfloat32_t, op2: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftssel.x.nxv4f32" + )] + fn _svtssel_f32(op1: svfloat32_t, op2: svint32_t) -> svfloat32_t; + } + unsafe { _svtssel_f32(op1, op2.as_signed()) } +} +#[doc = "Trigonometric select coefficient"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtssel[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ftssel))] +pub fn svtssel_f64(op1: svfloat64_t, op2: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftssel.x.nxv2f64" + )] + fn _svtssel_f64(op1: svfloat64_t, op2: svint64_t) -> svfloat64_t; + } + unsafe { _svtssel_f64(op1, op2.as_signed()) } +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_f32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_f32() -> svfloat32x2_t { + svcreate2_f32(svdup_n_f32(0f32), svdup_n_f32(0f32)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_f64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_f64() -> svfloat64x2_t { + svcreate2_f64(svdup_n_f64(0f64), svdup_n_f64(0f64)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_s8() -> svint8x2_t { + svcreate2_s8(svdup_n_s8(0), svdup_n_s8(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_s16() -> svint16x2_t { + svcreate2_s16(svdup_n_s16(0), svdup_n_s16(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_s32() -> svint32x2_t { + svcreate2_s32(svdup_n_s32(0), svdup_n_s32(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_s64() -> svint64x2_t { + svcreate2_s64(svdup_n_s64(0), svdup_n_s64(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_u8() -> svuint8x2_t { + svcreate2_u8(svdup_n_u8(0), svdup_n_u8(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_u16() -> svuint16x2_t { + svcreate2_u16(svdup_n_u16(0), svdup_n_u16(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_u32() -> svuint32x2_t { + svcreate2_u32(svdup_n_u32(0), svdup_n_u32(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_u64() -> svuint64x2_t { + svcreate2_u64(svdup_n_u64(0), svdup_n_u64(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_f32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_f32() -> svfloat32x3_t { + svcreate3_f32(svdup_n_f32(0f32), svdup_n_f32(0f32), svdup_n_f32(0f32)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_f64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_f64() -> svfloat64x3_t { + svcreate3_f64(svdup_n_f64(0f64), svdup_n_f64(0f64), svdup_n_f64(0f64)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_s8() -> svint8x3_t { + svcreate3_s8(svdup_n_s8(0), svdup_n_s8(0), svdup_n_s8(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_s16() -> svint16x3_t { + svcreate3_s16(svdup_n_s16(0), svdup_n_s16(0), svdup_n_s16(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_s32() -> svint32x3_t { + svcreate3_s32(svdup_n_s32(0), svdup_n_s32(0), svdup_n_s32(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_s64() -> svint64x3_t { + svcreate3_s64(svdup_n_s64(0), svdup_n_s64(0), svdup_n_s64(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_u8() -> svuint8x3_t { + svcreate3_u8(svdup_n_u8(0), svdup_n_u8(0), svdup_n_u8(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_u16() -> svuint16x3_t { + svcreate3_u16(svdup_n_u16(0), svdup_n_u16(0), svdup_n_u16(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_u32() -> svuint32x3_t { + svcreate3_u32(svdup_n_u32(0), svdup_n_u32(0), svdup_n_u32(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_u64() -> svuint64x3_t { + svcreate3_u64(svdup_n_u64(0), svdup_n_u64(0), svdup_n_u64(0)) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_f32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_f32() -> svfloat32x4_t { + svcreate4_f32( + svdup_n_f32(0f32), + svdup_n_f32(0f32), + svdup_n_f32(0f32), + svdup_n_f32(0f32), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_f64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_f64() -> svfloat64x4_t { + svcreate4_f64( + svdup_n_f64(0f64), + svdup_n_f64(0f64), + svdup_n_f64(0f64), + svdup_n_f64(0f64), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_s8() -> svint8x4_t { + svcreate4_s8(svdup_n_s8(0), svdup_n_s8(0), svdup_n_s8(0), svdup_n_s8(0)) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_s16() -> svint16x4_t { + svcreate4_s16( + svdup_n_s16(0), + svdup_n_s16(0), + svdup_n_s16(0), + svdup_n_s16(0), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_s32() -> svint32x4_t { + svcreate4_s32( + svdup_n_s32(0), + svdup_n_s32(0), + svdup_n_s32(0), + svdup_n_s32(0), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_s64() -> svint64x4_t { + svcreate4_s64( + svdup_n_s64(0), + svdup_n_s64(0), + svdup_n_s64(0), + svdup_n_s64(0), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_u8() -> svuint8x4_t { + svcreate4_u8(svdup_n_u8(0), svdup_n_u8(0), svdup_n_u8(0), svdup_n_u8(0)) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_u16() -> svuint16x4_t { + svcreate4_u16( + svdup_n_u16(0), + svdup_n_u16(0), + svdup_n_u16(0), + svdup_n_u16(0), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_u32() -> svuint32x4_t { + svcreate4_u32( + svdup_n_u32(0), + svdup_n_u32(0), + svdup_n_u32(0), + svdup_n_u32(0), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_u64() -> svuint64x4_t { + svcreate4_u64( + svdup_n_u64(0), + svdup_n_u64(0), + svdup_n_u64(0), + svdup_n_u64(0), + ) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_f32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_f32() -> svfloat32_t { + svdup_n_f32(0f32) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_f64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_f64() -> svfloat64_t { + svdup_n_f64(0f64) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_s8() -> svint8_t { + svdup_n_s8(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_s16() -> svint16_t { + svdup_n_s16(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_s32() -> svint32_t { + svdup_n_s32(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_s64() -> svint64_t { + svdup_n_s64(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_u8() -> svuint8_t { + svdup_n_u8(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_u16() -> svuint16_t { + svdup_n_u16(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_u32() -> svuint32_t { + svdup_n_u32(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_u64() -> svuint64_t { + svdup_n_u64(0) +} +#[doc = "Dot product (unsigned × signed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usdot, IMM_INDEX = 0))] +pub fn svusdot_lane_s32( + op1: svint32_t, + op2: svuint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.usdot.lane.nxv4i32" + )] + fn _svusdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { _svusdot_lane_s32(op1, op2.as_signed(), op3, IMM_INDEX) } +} +#[doc = "Dot product (unsigned × signed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svusdot_s32(op1: svint32_t, op2: svuint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usdot.nxv4i32")] + fn _svusdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svusdot_s32(op1, op2.as_signed(), op3) } +} +#[doc = "Dot product (unsigned × signed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svusdot_n_s32(op1: svint32_t, op2: svuint8_t, op3: i8) -> svint32_t { + svusdot_s32(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Matrix multiply-accumulate (unsigned × signed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusmmla[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usmmla))] +pub fn svusmmla_s32(op1: svint32_t, op2: svuint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usmmla.nxv4i32")] + fn _svusmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svusmmla_s32(op1, op2.as_signed(), op3) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv16i1")] + fn _svuzp1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svuzp1_b8(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv8i1")] + fn _svuzp1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svuzp1_b16(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4i1")] + fn _svuzp1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svuzp1_b32(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2i1")] + fn _svuzp1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svuzp1_b64(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4f32")] + fn _svuzp1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp1_f32(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2f64")] + fn _svuzp1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svuzp1_f64(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv16i8")] + fn _svuzp1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp1_s8(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv8i16")] + fn _svuzp1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp1_s16(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4i32")] + fn _svuzp1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp1_s32(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2i64")] + fn _svuzp1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuzp1_s64(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv4f32")] + fn _svuzp1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp1q_f32(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv2f64")] + fn _svuzp1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svuzp1q_f64(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv16i8")] + fn _svuzp1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp1q_s8(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv8i16")] + fn _svuzp1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp1q_s16(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv4i32")] + fn _svuzp1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp1q_s32(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv2i64")] + fn _svuzp1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuzp1q_s64(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv16i1")] + fn _svuzp2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svuzp2_b8(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv8i1")] + fn _svuzp2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svuzp2_b16(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4i1")] + fn _svuzp2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svuzp2_b32(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2i1")] + fn _svuzp2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svuzp2_b64(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4f32")] + fn _svuzp2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp2_f32(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2f64")] + fn _svuzp2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svuzp2_f64(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv16i8")] + fn _svuzp2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp2_s8(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv8i16")] + fn _svuzp2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp2_s16(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4i32")] + fn _svuzp2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp2_s32(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2i64")] + fn _svuzp2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuzp2_s64(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv4f32")] + fn _svuzp2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp2q_f32(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv2f64")] + fn _svuzp2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svuzp2q_f64(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv16i8")] + fn _svuzp2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp2q_s8(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv8i16")] + fn _svuzp2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp2q_s16(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv4i32")] + fn _svuzp2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp2q_s32(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv2i64")] + fn _svuzp2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuzp2q_s64(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv16i1.i32" + )] + fn _svwhilele_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilele_b8_s32(op1, op2) } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv8i1.i32" + )] + fn _svwhilele_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilele_b16_s32(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv4i1.i32" + )] + fn _svwhilele_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilele_b32_s32(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv2i1.i32" + )] + fn _svwhilele_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilele_b64_s32(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv16i1.i64" + )] + fn _svwhilele_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilele_b8_s64(op1, op2) } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv8i1.i64" + )] + fn _svwhilele_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilele_b16_s64(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv4i1.i64" + )] + fn _svwhilele_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilele_b32_s64(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv2i1.i64" + )] + fn _svwhilele_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilele_b64_s64(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv16i1.i32" + )] + fn _svwhilele_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilele_b8_u32(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv8i1.i32" + )] + fn _svwhilele_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilele_b16_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv4i1.i32" + )] + fn _svwhilele_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilele_b32_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv2i1.i32" + )] + fn _svwhilele_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilele_b64_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv16i1.i64" + )] + fn _svwhilele_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilele_b8_u64(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv8i1.i64" + )] + fn _svwhilele_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilele_b16_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv4i1.i64" + )] + fn _svwhilele_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilele_b32_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv2i1.i64" + )] + fn _svwhilele_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilele_b64_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv16i1.i32" + )] + fn _svwhilelt_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilelt_b8_s32(op1, op2) } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv8i1.i32" + )] + fn _svwhilelt_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilelt_b16_s32(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv4i1.i32" + )] + fn _svwhilelt_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilelt_b32_s32(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv2i1.i32" + )] + fn _svwhilelt_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilelt_b64_s32(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv16i1.i64" + )] + fn _svwhilelt_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilelt_b8_s64(op1, op2) } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv8i1.i64" + )] + fn _svwhilelt_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilelt_b16_s64(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv4i1.i64" + )] + fn _svwhilelt_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilelt_b32_s64(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv2i1.i64" + )] + fn _svwhilelt_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilelt_b64_s64(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv16i1.i32" + )] + fn _svwhilelt_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilelt_b8_u32(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv8i1.i32" + )] + fn _svwhilelt_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilelt_b16_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv4i1.i32" + )] + fn _svwhilelt_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilelt_b32_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv2i1.i32" + )] + fn _svwhilelt_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilelt_b64_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv16i1.i64" + )] + fn _svwhilelt_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilelt_b8_u64(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv8i1.i64" + )] + fn _svwhilelt_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilelt_b16_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv4i1.i64" + )] + fn _svwhilelt_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilelt_b32_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv2i1.i64" + )] + fn _svwhilelt_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilelt_b64_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Write to the first-fault register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwrffr)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(wrffr))] +pub fn svwrffr(op: svbool_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.wrffr")] + fn _svwrffr(op: svbool_t); + } + unsafe { _svwrffr(op) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv16i1")] + fn _svzip1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svzip1_b8(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv8i1")] + fn _svzip1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svzip1_b16(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4i1")] + fn _svzip1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svzip1_b32(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2i1")] + fn _svzip1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svzip1_b64(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4f32")] + fn _svzip1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svzip1_f32(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2f64")] + fn _svzip1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svzip1_f64(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv16i8")] + fn _svzip1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svzip1_s8(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv8i16")] + fn _svzip1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svzip1_s16(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4i32")] + fn _svzip1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svzip1_s32(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2i64")] + fn _svzip1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svzip1_s64(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv4f32")] + fn _svzip1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svzip1q_f32(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv2f64")] + fn _svzip1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svzip1q_f64(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv16i8")] + fn _svzip1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svzip1q_s8(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv8i16")] + fn _svzip1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svzip1q_s16(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv4i32")] + fn _svzip1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svzip1q_s32(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv2i64")] + fn _svzip1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svzip1q_s64(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv16i1")] + fn _svzip2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svzip2_b8(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv8i1")] + fn _svzip2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svzip2_b16(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4i1")] + fn _svzip2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svzip2_b32(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2i1")] + fn _svzip2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svzip2_b64(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4f32")] + fn _svzip2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svzip2_f32(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2f64")] + fn _svzip2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svzip2_f64(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv16i8")] + fn _svzip2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svzip2_s8(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv8i16")] + fn _svzip2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svzip2_s16(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4i32")] + fn _svzip2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svzip2_s32(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2i64")] + fn _svzip2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svzip2_s64(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv4f32")] + fn _svzip2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svzip2q_f32(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv2f64")] + fn _svzip2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svzip2q_f64(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv16i8")] + fn _svzip2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svzip2q_s8(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv8i16")] + fn _svzip2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svzip2q_s16(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv4i32")] + fn _svzip2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svzip2q_s32(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv2i64")] + fn _svzip2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svzip2q_s64(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} diff --git a/crates/core_arch/src/aarch64/sve/ld_st_tests_aarch64.rs b/crates/core_arch/src/aarch64/sve/ld_st_tests_aarch64.rs new file mode 100644 index 0000000000..973b7e9fa3 --- /dev/null +++ b/crates/core_arch/src/aarch64/sve/ld_st_tests_aarch64.rs @@ -0,0 +1,9345 @@ +// This code is automatically generated. DO NOT MODIFY. +// +// Instead, modify `crates/stdarch-gen-arm/spec/sve` and run the following command to re-generate +// this file: +// +// ``` +// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec +// ``` +#![allow(unused)] +use super::*; +use std::boxed::Box; +use std::convert::{TryFrom, TryInto}; +use std::sync::LazyLock; +use std::vec::Vec; +use stdarch_test::simd_test; +static F32_DATA: LazyLock<[f32; 64 * 5]> = LazyLock::new(|| { + (0..64 * 5) + .map(|i| i as f32) + .collect::>() + .try_into() + .expect("f32 data incorrectly initialised") +}); +static F64_DATA: LazyLock<[f64; 32 * 5]> = LazyLock::new(|| { + (0..32 * 5) + .map(|i| i as f64) + .collect::>() + .try_into() + .expect("f64 data incorrectly initialised") +}); +static I8_DATA: LazyLock<[i8; 256 * 5]> = LazyLock::new(|| { + (0..256 * 5) + .map(|i| ((i + 128) % 256 - 128) as i8) + .collect::>() + .try_into() + .expect("i8 data incorrectly initialised") +}); +static I16_DATA: LazyLock<[i16; 128 * 5]> = LazyLock::new(|| { + (0..128 * 5) + .map(|i| i as i16) + .collect::>() + .try_into() + .expect("i16 data incorrectly initialised") +}); +static I32_DATA: LazyLock<[i32; 64 * 5]> = LazyLock::new(|| { + (0..64 * 5) + .map(|i| i as i32) + .collect::>() + .try_into() + .expect("i32 data incorrectly initialised") +}); +static I64_DATA: LazyLock<[i64; 32 * 5]> = LazyLock::new(|| { + (0..32 * 5) + .map(|i| i as i64) + .collect::>() + .try_into() + .expect("i64 data incorrectly initialised") +}); +static U8_DATA: LazyLock<[u8; 256 * 5]> = LazyLock::new(|| { + (0..256 * 5) + .map(|i| i as u8) + .collect::>() + .try_into() + .expect("u8 data incorrectly initialised") +}); +static U16_DATA: LazyLock<[u16; 128 * 5]> = LazyLock::new(|| { + (0..128 * 5) + .map(|i| i as u16) + .collect::>() + .try_into() + .expect("u16 data incorrectly initialised") +}); +static U32_DATA: LazyLock<[u32; 64 * 5]> = LazyLock::new(|| { + (0..64 * 5) + .map(|i| i as u32) + .collect::>() + .try_into() + .expect("u32 data incorrectly initialised") +}); +static U64_DATA: LazyLock<[u64; 32 * 5]> = LazyLock::new(|| { + (0..32 * 5) + .map(|i| i as u64) + .collect::>() + .try_into() + .expect("u64 data incorrectly initialised") +}); +#[target_feature(enable = "sve")] +fn assert_vector_matches_f32(vector: svfloat32_t, expected: svfloat32_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b32(), defined)); + let cmp = svcmpne_f32(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_f64(vector: svfloat64_t, expected: svfloat64_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b64(), defined)); + let cmp = svcmpne_f64(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i8(vector: svint8_t, expected: svint8_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b8(), defined)); + let cmp = svcmpne_s8(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i16(vector: svint16_t, expected: svint16_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b16(), defined)); + let cmp = svcmpne_s16(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i32(vector: svint32_t, expected: svint32_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b32(), defined)); + let cmp = svcmpne_s32(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i64(vector: svint64_t, expected: svint64_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b64(), defined)); + let cmp = svcmpne_s64(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u8(vector: svuint8_t, expected: svuint8_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b8(), defined)); + let cmp = svcmpne_u8(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u16(vector: svuint16_t, expected: svuint16_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b16(), defined)); + let cmp = svcmpne_u16(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u32(vector: svuint32_t, expected: svuint32_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b32(), defined)); + let cmp = svcmpne_u32(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u64(vector: svuint64_t, expected: svuint64_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b64(), defined)); + let cmp = svcmpne_u64(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_f32_with_svst1_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + svst1_f32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_f32(svptrue_b32(), storage.as_ptr() as *const f32); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_f64_with_svst1_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + svst1_f64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_f64(svptrue_b64(), storage.as_ptr() as *const f64); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_s8_with_svst1_s8() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_s8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1_s8(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i8( + loaded, + svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_s16_with_svst1_s16() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_s16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1_s16(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_s32_with_svst1_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_s32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_s32(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_s64_with_svst1_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_s64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_s64(svptrue_b64(), storage.as_ptr() as *const i64); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_u8_with_svst1_u8() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_u8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1_u8(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u8( + loaded, + svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_u16_with_svst1_u16() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_u16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1_u16(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_u32_with_svst1_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_u32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_u32(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_u64_with_svst1_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_u64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_u64(svptrue_b64(), storage.as_ptr() as *const u64); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s32index_f32_with_svst1_scatter_s32index_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let indices = svindex_s32(0, 1); + svst1_scatter_s32index_f32(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_gather_s32index_f32(svptrue_b32(), storage.as_ptr() as *const f32, indices); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s32index_s32_with_svst1_scatter_s32index_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s32(0, 1); + svst1_scatter_s32index_s32(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_gather_s32index_s32(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s32index_u32_with_svst1_scatter_s32index_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s32(0, 1); + svst1_scatter_s32index_u32(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_gather_s32index_u32(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s64index_f64_with_svst1_scatter_s64index_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let indices = svindex_s64(0, 1); + svst1_scatter_s64index_f64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_s64index_f64(svptrue_b64(), storage.as_ptr() as *const f64, indices); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s64index_s64_with_svst1_scatter_s64index_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1_scatter_s64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_s64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s64index_u64_with_svst1_scatter_s64index_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1_scatter_s64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_s64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32index_f32_with_svst1_scatter_u32index_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let indices = svindex_u32(0, 1); + svst1_scatter_u32index_f32(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_gather_u32index_f32(svptrue_b32(), storage.as_ptr() as *const f32, indices); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32index_s32_with_svst1_scatter_u32index_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u32(0, 1); + svst1_scatter_u32index_s32(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_gather_u32index_s32(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32index_u32_with_svst1_scatter_u32index_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u32(0, 1); + svst1_scatter_u32index_u32(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_gather_u32index_u32(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64index_f64_with_svst1_scatter_u64index_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let indices = svindex_u64(0, 1); + svst1_scatter_u64index_f64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_u64index_f64(svptrue_b64(), storage.as_ptr() as *const f64, indices); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64index_s64_with_svst1_scatter_u64index_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1_scatter_u64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_u64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64index_u64_with_svst1_scatter_u64index_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1_scatter_u64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_u64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s32offset_f32_with_svst1_scatter_s32offset_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_s32(0, 4u32.try_into().unwrap()); + svst1_scatter_s32offset_f32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_gather_s32offset_f32(svptrue_b32(), storage.as_ptr() as *const f32, offsets); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s32offset_s32_with_svst1_scatter_s32offset_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 4u32.try_into().unwrap()); + svst1_scatter_s32offset_s32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_gather_s32offset_s32(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s32offset_u32_with_svst1_scatter_s32offset_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 4u32.try_into().unwrap()); + svst1_scatter_s32offset_u32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_gather_s32offset_u32(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s64offset_f64_with_svst1_scatter_s64offset_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svst1_scatter_s64offset_f64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_s64offset_f64(svptrue_b64(), storage.as_ptr() as *const f64, offsets); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s64offset_s64_with_svst1_scatter_s64offset_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svst1_scatter_s64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_s64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s64offset_u64_with_svst1_scatter_s64offset_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svst1_scatter_s64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_s64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32offset_f32_with_svst1_scatter_u32offset_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32offset_f32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_gather_u32offset_f32(svptrue_b32(), storage.as_ptr() as *const f32, offsets); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32offset_s32_with_svst1_scatter_u32offset_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32offset_s32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_gather_u32offset_s32(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32offset_u32_with_svst1_scatter_u32offset_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32offset_u32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_gather_u32offset_u32(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64offset_f64_with_svst1_scatter_u64offset_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svst1_scatter_u64offset_f64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_u64offset_f64(svptrue_b64(), storage.as_ptr() as *const f64, offsets); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64offset_s64_with_svst1_scatter_u64offset_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svst1_scatter_u64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_u64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64offset_u64_with_svst1_scatter_u64offset_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svst1_scatter_u64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_u64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_f64_with_svst1_scatter_u64base_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_f64(svptrue_b64(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_u64base_f64(svptrue_b64(), bases); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_s64_with_svst1_scatter_u64base_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_s64(svptrue_b64(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_u64base_s64(svptrue_b64(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_u64_with_svst1_scatter_u64base_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_u64(svptrue_b64(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_u64base_u64(svptrue_b64(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32base_index_f32_with_svst1_scatter_u32base_index_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32base_index_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_gather_u32base_index_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32base_index_s32_with_svst1_scatter_u32base_index_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32base_index_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_gather_u32base_index_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32base_index_u32_with_svst1_scatter_u32base_index_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32base_index_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_gather_u32base_index_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_index_f64_with_svst1_scatter_u64base_index_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_index_s64_with_svst1_scatter_u64base_index_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_index_u64_with_svst1_scatter_u64base_index_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32base_offset_f32_with_svst1_scatter_u32base_offset_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32base_offset_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_gather_u32base_offset_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32base_offset_s32_with_svst1_scatter_u32base_offset_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32base_offset_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_gather_u32base_offset_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32base_offset_u32_with_svst1_scatter_u32base_offset_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32base_offset_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_gather_u32base_offset_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_offset_f64_with_svst1_scatter_u64base_offset_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_offset_s64_with_svst1_scatter_u64base_offset_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_offset_u64_with_svst1_scatter_u64base_offset_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_f32_with_svst1_vnum_f32() { + let len = svcntw() as usize; + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); + svst1_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_f64_with_svst1_vnum_f64() { + let len = svcntd() as usize; + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); + svst1_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_s8_with_svst1_vnum_s8() { + let len = svcntb() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i8( + loaded, + svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_s16_with_svst1_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_s32_with_svst1_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_s64_with_svst1_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_u8_with_svst1_vnum_u8() { + let len = svcntb() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u8( + loaded, + svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_u16_with_svst1_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_u32_with_svst1_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_u64_with_svst1_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_f32() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_f32 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_f32(svptrue_b32(), F32_DATA.as_ptr()); + assert_vector_matches_f32( + loaded, + svtrn1q_f32( + svdupq_n_f32(0usize as f32, 1usize as f32, 2usize as f32, 3usize as f32), + svdupq_n_f32(4usize as f32, 5usize as f32, 6usize as f32, 7usize as f32), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_f64() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_f64 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_f64(svptrue_b64(), F64_DATA.as_ptr()); + assert_vector_matches_f64( + loaded, + svtrn1q_f64( + svdupq_n_f64(0usize as f64, 1usize as f64), + svdupq_n_f64(2usize as f64, 3usize as f64), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_s8() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_s8 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_s8(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i8( + loaded, + svtrn1q_s8( + svdupq_n_s8( + 0usize as i8, + 1usize as i8, + 2usize as i8, + 3usize as i8, + 4usize as i8, + 5usize as i8, + 6usize as i8, + 7usize as i8, + 8usize as i8, + 9usize as i8, + 10usize as i8, + 11usize as i8, + 12usize as i8, + 13usize as i8, + 14usize as i8, + 15usize as i8, + ), + svdupq_n_s8( + 16usize as i8, + 17usize as i8, + 18usize as i8, + 19usize as i8, + 20usize as i8, + 21usize as i8, + 22usize as i8, + 23usize as i8, + 24usize as i8, + 25usize as i8, + 26usize as i8, + 27usize as i8, + 28usize as i8, + 29usize as i8, + 30usize as i8, + 31usize as i8, + ), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_s16() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_s16 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_s16(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svtrn1q_s16( + svdupq_n_s16( + 0usize as i16, + 1usize as i16, + 2usize as i16, + 3usize as i16, + 4usize as i16, + 5usize as i16, + 6usize as i16, + 7usize as i16, + ), + svdupq_n_s16( + 8usize as i16, + 9usize as i16, + 10usize as i16, + 11usize as i16, + 12usize as i16, + 13usize as i16, + 14usize as i16, + 15usize as i16, + ), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_s32() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_s32 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_s32(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svtrn1q_s32( + svdupq_n_s32(0usize as i32, 1usize as i32, 2usize as i32, 3usize as i32), + svdupq_n_s32(4usize as i32, 5usize as i32, 6usize as i32, 7usize as i32), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_s64() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_s64 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_s64(svptrue_b64(), I64_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svtrn1q_s64( + svdupq_n_s64(0usize as i64, 1usize as i64), + svdupq_n_s64(2usize as i64, 3usize as i64), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_u8() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_u8 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_u8(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u8( + loaded, + svtrn1q_u8( + svdupq_n_u8( + 0usize as u8, + 1usize as u8, + 2usize as u8, + 3usize as u8, + 4usize as u8, + 5usize as u8, + 6usize as u8, + 7usize as u8, + 8usize as u8, + 9usize as u8, + 10usize as u8, + 11usize as u8, + 12usize as u8, + 13usize as u8, + 14usize as u8, + 15usize as u8, + ), + svdupq_n_u8( + 16usize as u8, + 17usize as u8, + 18usize as u8, + 19usize as u8, + 20usize as u8, + 21usize as u8, + 22usize as u8, + 23usize as u8, + 24usize as u8, + 25usize as u8, + 26usize as u8, + 27usize as u8, + 28usize as u8, + 29usize as u8, + 30usize as u8, + 31usize as u8, + ), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_u16() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_u16 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_u16(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svtrn1q_u16( + svdupq_n_u16( + 0usize as u16, + 1usize as u16, + 2usize as u16, + 3usize as u16, + 4usize as u16, + 5usize as u16, + 6usize as u16, + 7usize as u16, + ), + svdupq_n_u16( + 8usize as u16, + 9usize as u16, + 10usize as u16, + 11usize as u16, + 12usize as u16, + 13usize as u16, + 14usize as u16, + 15usize as u16, + ), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_u32() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_u32 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_u32(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svtrn1q_u32( + svdupq_n_u32(0usize as u32, 1usize as u32, 2usize as u32, 3usize as u32), + svdupq_n_u32(4usize as u32, 5usize as u32, 6usize as u32, 7usize as u32), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_u64() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_u64 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_u64(svptrue_b64(), U64_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svtrn1q_u64( + svdupq_n_u64(0usize as u64, 1usize as u64), + svdupq_n_u64(2usize as u64, 3usize as u64), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_f32() { + svsetffr(); + let loaded = svld1rq_f32(svptrue_b32(), F32_DATA.as_ptr()); + assert_vector_matches_f32( + loaded, + svdupq_n_f32(0usize as f32, 1usize as f32, 2usize as f32, 3usize as f32), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_f64() { + svsetffr(); + let loaded = svld1rq_f64(svptrue_b64(), F64_DATA.as_ptr()); + assert_vector_matches_f64(loaded, svdupq_n_f64(0usize as f64, 1usize as f64)); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_s8() { + svsetffr(); + let loaded = svld1rq_s8(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i8( + loaded, + svdupq_n_s8( + 0usize as i8, + 1usize as i8, + 2usize as i8, + 3usize as i8, + 4usize as i8, + 5usize as i8, + 6usize as i8, + 7usize as i8, + 8usize as i8, + 9usize as i8, + 10usize as i8, + 11usize as i8, + 12usize as i8, + 13usize as i8, + 14usize as i8, + 15usize as i8, + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_s16() { + svsetffr(); + let loaded = svld1rq_s16(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svdupq_n_s16( + 0usize as i16, + 1usize as i16, + 2usize as i16, + 3usize as i16, + 4usize as i16, + 5usize as i16, + 6usize as i16, + 7usize as i16, + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_s32() { + svsetffr(); + let loaded = svld1rq_s32(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svdupq_n_s32(0usize as i32, 1usize as i32, 2usize as i32, 3usize as i32), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_s64() { + svsetffr(); + let loaded = svld1rq_s64(svptrue_b64(), I64_DATA.as_ptr()); + assert_vector_matches_i64(loaded, svdupq_n_s64(0usize as i64, 1usize as i64)); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_u8() { + svsetffr(); + let loaded = svld1rq_u8(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u8( + loaded, + svdupq_n_u8( + 0usize as u8, + 1usize as u8, + 2usize as u8, + 3usize as u8, + 4usize as u8, + 5usize as u8, + 6usize as u8, + 7usize as u8, + 8usize as u8, + 9usize as u8, + 10usize as u8, + 11usize as u8, + 12usize as u8, + 13usize as u8, + 14usize as u8, + 15usize as u8, + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_u16() { + svsetffr(); + let loaded = svld1rq_u16(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svdupq_n_u16( + 0usize as u16, + 1usize as u16, + 2usize as u16, + 3usize as u16, + 4usize as u16, + 5usize as u16, + 6usize as u16, + 7usize as u16, + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_u32() { + svsetffr(); + let loaded = svld1rq_u32(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svdupq_n_u32(0usize as u32, 1usize as u32, 2usize as u32, 3usize as u32), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_u64() { + svsetffr(); + let loaded = svld1rq_u64(svptrue_b64(), U64_DATA.as_ptr()); + assert_vector_matches_u64(loaded, svdupq_n_u64(0usize as u64, 1usize as u64)); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_s32offset_s32_with_svst1b_scatter_s32offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 1u32.try_into().unwrap()); + svst1b_scatter_s32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_s32offset_s32(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s32offset_s32_with_svst1h_scatter_s32offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 2u32.try_into().unwrap()); + svst1h_scatter_s32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_s32offset_s32(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_s32offset_u32_with_svst1b_scatter_s32offset_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 1u32.try_into().unwrap()); + svst1b_scatter_s32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_gather_s32offset_u32(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s32offset_u32_with_svst1h_scatter_s32offset_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 2u32.try_into().unwrap()); + svst1h_scatter_s32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_s32offset_u32(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_s64offset_s64_with_svst1b_scatter_s64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svst1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s64offset_s64_with_svst1h_scatter_s64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svst1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_s64offset_s64_with_svst1w_scatter_s64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svst1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1sw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_s64offset_u64_with_svst1b_scatter_s64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svst1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s64offset_u64_with_svst1h_scatter_s64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svst1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_s64offset_u64_with_svst1w_scatter_s64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svst1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1sw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u32offset_s32_with_svst1b_scatter_u32offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32offset_s32_with_svst1h_scatter_u32offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u32offset_u32_with_svst1b_scatter_u32offset_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32offset_u32_with_svst1h_scatter_u32offset_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u64offset_s64_with_svst1b_scatter_u64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svst1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64offset_s64_with_svst1h_scatter_u64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svst1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64offset_s64_with_svst1w_scatter_u64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svst1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1sw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u64offset_u64_with_svst1b_scatter_u64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svst1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64offset_u64_with_svst1h_scatter_u64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svst1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64offset_u64_with_svst1w_scatter_u64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svst1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1sw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u32base_offset_s32_with_svst1b_scatter_u32base_offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32base_offset_s32_with_svst1h_scatter_u32base_offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u32base_offset_u32_with_svst1b_scatter_u32base_offset_u32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32base_offset_u32_with_svst1h_scatter_u32base_offset_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u64base_offset_s64_with_svst1b_scatter_u64base_offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64base_offset_s64_with_svst1h_scatter_u64base_offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64base_offset_s64_with_svst1w_scatter_u64base_offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u64base_offset_u64_with_svst1b_scatter_u64base_offset_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64base_offset_u64_with_svst1h_scatter_u64base_offset_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64base_offset_u64_with_svst1w_scatter_u64base_offset_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u64base_s64_with_svst1b_scatter_u64base_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_s64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u64base_s64(svptrue_b8(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64base_s64_with_svst1h_scatter_u64base_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_s64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u64base_s64(svptrue_b16(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64base_s64_with_svst1w_scatter_u64base_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_s64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_gather_u64base_s64(svptrue_b32(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u64base_u64_with_svst1b_scatter_u64base_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_u64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u64base_u64(svptrue_b8(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64base_u64_with_svst1h_scatter_u64base_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_u64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u64base_u64(svptrue_b16(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64base_u64_with_svst1w_scatter_u64base_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_u64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_gather_u64base_u64(svptrue_b32(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_s16_with_svst1b_s16() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_s16(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_s16(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_s32_with_svst1b_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_s32(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_s32(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_s32_with_svst1h_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_s32(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_s32(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_s64_with_svst1b_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_s64(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_s64(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_s64_with_svst1h_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_s64(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_s64(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_s64_with_svst1w_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1w_s64(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_s64(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_u16_with_svst1b_u16() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_u16(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_u16(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_u32_with_svst1b_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_u32(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_u32(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_u32_with_svst1h_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_u32(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1sh_u32(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_u64_with_svst1b_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_u64(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_u64(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_u64_with_svst1h_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_u64(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1sh_u64(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_u64_with_svst1w_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1w_u64(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1sw_u64(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_vnum_s16_with_svst1b_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_s16(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_vnum_s16(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_vnum_s32_with_svst1b_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_s32(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_vnum_s32(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_vnum_s32_with_svst1h_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_s32(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_vnum_s32(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_vnum_s64_with_svst1b_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_s64(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_vnum_s64(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_vnum_s64_with_svst1h_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_s64(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_vnum_s64(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_vnum_s64_with_svst1w_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1w_vnum_s64(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_vnum_s64(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_vnum_u16_with_svst1b_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_u16(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_vnum_u16(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_vnum_u32_with_svst1b_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_u32(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_vnum_u32(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_vnum_u32_with_svst1h_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_u32(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1sh_vnum_u32(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_vnum_u64_with_svst1b_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_u64(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_vnum_u64(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_vnum_u64_with_svst1h_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_u64(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1sh_vnum_u64(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_vnum_u64_with_svst1w_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1w_vnum_u64(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1sw_vnum_u64(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s32index_s32_with_svst1h_scatter_s32index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s32(0, 1); + svst1h_scatter_s32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_s32index_s32(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s32index_u32_with_svst1h_scatter_s32index_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s32(0, 1); + svst1h_scatter_s32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_s32index_u32(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s64index_s64_with_svst1h_scatter_s64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_s64index_s64_with_svst1w_scatter_s64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1sw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s64index_u64_with_svst1h_scatter_s64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_s64index_u64_with_svst1w_scatter_s64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1sw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32index_s32_with_svst1h_scatter_u32index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u32(0, 1); + svst1h_scatter_u32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_u32index_s32(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32index_u32_with_svst1h_scatter_u32index_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u32(0, 1); + svst1h_scatter_u32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_u32index_u32(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64index_s64_with_svst1h_scatter_u64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64index_s64_with_svst1w_scatter_u64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1sw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64index_u64_with_svst1h_scatter_u64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64index_u64_with_svst1w_scatter_u64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1sw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32base_index_s32_with_svst1h_scatter_u32base_index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32base_index_u32_with_svst1h_scatter_u32base_index_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64base_index_s64_with_svst1h_scatter_u64base_index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64base_index_s64_with_svst1w_scatter_u64base_index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64base_index_u64_with_svst1h_scatter_u64base_index_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64base_index_u64_with_svst1w_scatter_u64base_index_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_s32offset_s32_with_svst1b_scatter_s32offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 1u32.try_into().unwrap()); + svst1b_scatter_s32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_s32offset_s32(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s32offset_s32_with_svst1h_scatter_s32offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 2u32.try_into().unwrap()); + svst1h_scatter_s32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_s32offset_s32(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_s32offset_u32_with_svst1b_scatter_s32offset_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 1u32.try_into().unwrap()); + svst1b_scatter_s32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_gather_s32offset_u32(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s32offset_u32_with_svst1h_scatter_s32offset_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 2u32.try_into().unwrap()); + svst1h_scatter_s32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_s32offset_u32(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_s64offset_s64_with_svst1b_scatter_s64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svst1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s64offset_s64_with_svst1h_scatter_s64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svst1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_s64offset_s64_with_svst1w_scatter_s64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svst1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1uw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_s64offset_u64_with_svst1b_scatter_s64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svst1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s64offset_u64_with_svst1h_scatter_s64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svst1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_s64offset_u64_with_svst1w_scatter_s64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svst1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1uw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u32offset_s32_with_svst1b_scatter_u32offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32offset_s32_with_svst1h_scatter_u32offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u32offset_u32_with_svst1b_scatter_u32offset_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32offset_u32_with_svst1h_scatter_u32offset_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u64offset_s64_with_svst1b_scatter_u64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svst1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64offset_s64_with_svst1h_scatter_u64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svst1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64offset_s64_with_svst1w_scatter_u64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svst1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1uw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u64offset_u64_with_svst1b_scatter_u64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svst1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64offset_u64_with_svst1h_scatter_u64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svst1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64offset_u64_with_svst1w_scatter_u64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svst1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1uw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u32base_offset_s32_with_svst1b_scatter_u32base_offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32base_offset_s32_with_svst1h_scatter_u32base_offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u32base_offset_u32_with_svst1b_scatter_u32base_offset_u32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32base_offset_u32_with_svst1h_scatter_u32base_offset_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u64base_offset_s64_with_svst1b_scatter_u64base_offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64base_offset_s64_with_svst1h_scatter_u64base_offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64base_offset_s64_with_svst1w_scatter_u64base_offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u64base_offset_u64_with_svst1b_scatter_u64base_offset_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64base_offset_u64_with_svst1h_scatter_u64base_offset_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64base_offset_u64_with_svst1w_scatter_u64base_offset_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u64base_s64_with_svst1b_scatter_u64base_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_s64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u64base_s64(svptrue_b8(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64base_s64_with_svst1h_scatter_u64base_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_s64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u64base_s64(svptrue_b16(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64base_s64_with_svst1w_scatter_u64base_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_s64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_gather_u64base_s64(svptrue_b32(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u64base_u64_with_svst1b_scatter_u64base_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_u64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u64base_u64(svptrue_b8(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64base_u64_with_svst1h_scatter_u64base_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_u64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u64base_u64(svptrue_b16(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64base_u64_with_svst1w_scatter_u64base_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_u64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_gather_u64base_u64(svptrue_b32(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_s16_with_svst1b_s16() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_s16(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_s16(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_s32_with_svst1b_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_s32(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_s32(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_s32_with_svst1h_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_s32(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_s32(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_s64_with_svst1b_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_s64(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_s64(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_s64_with_svst1h_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_s64(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_s64(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_s64_with_svst1w_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1w_s64(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_s64(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_u16_with_svst1b_u16() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_u16(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_u16(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_u32_with_svst1b_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_u32(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_u32(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_u32_with_svst1h_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_u32(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1uh_u32(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_u64_with_svst1b_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_u64(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_u64(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_u64_with_svst1h_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_u64(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1uh_u64(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_u64_with_svst1w_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1w_u64(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1uw_u64(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_vnum_s16_with_svst1b_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_s16(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_vnum_s16(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_vnum_s32_with_svst1b_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_s32(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_vnum_s32(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_vnum_s32_with_svst1h_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_s32(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_vnum_s32(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_vnum_s64_with_svst1b_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_s64(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_vnum_s64(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_vnum_s64_with_svst1h_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_s64(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_vnum_s64(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_vnum_s64_with_svst1w_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1w_vnum_s64(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_vnum_s64(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_vnum_u16_with_svst1b_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_u16(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_vnum_u16(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_vnum_u32_with_svst1b_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_u32(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_vnum_u32(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_vnum_u32_with_svst1h_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_u32(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1uh_vnum_u32(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_vnum_u64_with_svst1b_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_u64(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_vnum_u64(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_vnum_u64_with_svst1h_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_u64(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1uh_vnum_u64(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_vnum_u64_with_svst1w_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1w_vnum_u64(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1uw_vnum_u64(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s32index_s32_with_svst1h_scatter_s32index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s32(0, 1); + svst1h_scatter_s32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_s32index_s32(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s32index_u32_with_svst1h_scatter_s32index_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s32(0, 1); + svst1h_scatter_s32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_s32index_u32(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s64index_s64_with_svst1h_scatter_s64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_s64index_s64_with_svst1w_scatter_s64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1uw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s64index_u64_with_svst1h_scatter_s64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_s64index_u64_with_svst1w_scatter_s64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1uw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32index_s32_with_svst1h_scatter_u32index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u32(0, 1); + svst1h_scatter_u32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_u32index_s32(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32index_u32_with_svst1h_scatter_u32index_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u32(0, 1); + svst1h_scatter_u32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_u32index_u32(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64index_s64_with_svst1h_scatter_u64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64index_s64_with_svst1w_scatter_u64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1uw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64index_u64_with_svst1h_scatter_u64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64index_u64_with_svst1w_scatter_u64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1uw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32base_index_s32_with_svst1h_scatter_u32base_index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32base_index_u32_with_svst1h_scatter_u32base_index_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64base_index_s64_with_svst1h_scatter_u64base_index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64base_index_s64_with_svst1w_scatter_u64base_index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64base_index_u64_with_svst1h_scatter_u64base_index_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64base_index_u64_with_svst1w_scatter_u64base_index_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_f32_with_svst2_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcreate2_f32( + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + ); + svst2_f32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld2_f32(svptrue_b32(), storage.as_ptr() as *const f32); + assert_vector_matches_f32( + svget2_f32::<{ 0usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f32( + svget2_f32::<{ 1usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_f64_with_svst2_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcreate2_f64( + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + ); + svst2_f64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld2_f64(svptrue_b64(), storage.as_ptr() as *const f64); + assert_vector_matches_f64( + svget2_f64::<{ 0usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f64( + svget2_f64::<{ 1usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_s8_with_svst2_s8() { + let mut storage = [0 as i8; 1280usize]; + let data = svcreate2_s8( + svindex_s8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_s8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_s8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld2_s8(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i8( + svget2_s8::<{ 0usize as i32 }>(loaded), + svindex_s8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_i8( + svget2_s8::<{ 1usize as i32 }>(loaded), + svindex_s8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_s16_with_svst2_s16() { + let mut storage = [0 as i16; 640usize]; + let data = svcreate2_s16( + svindex_s16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_s16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_s16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld2_s16(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i16( + svget2_s16::<{ 0usize as i32 }>(loaded), + svindex_s16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_i16( + svget2_s16::<{ 1usize as i32 }>(loaded), + svindex_s16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_s32_with_svst2_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svcreate2_s32( + svindex_s32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_s32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_s32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld2_s32(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_i32( + svget2_s32::<{ 0usize as i32 }>(loaded), + svindex_s32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_i32( + svget2_s32::<{ 1usize as i32 }>(loaded), + svindex_s32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_s64_with_svst2_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svcreate2_s64( + svindex_s64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_s64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_s64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld2_s64(svptrue_b64(), storage.as_ptr() as *const i64); + assert_vector_matches_i64( + svget2_s64::<{ 0usize as i32 }>(loaded), + svindex_s64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_i64( + svget2_s64::<{ 1usize as i32 }>(loaded), + svindex_s64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_u8_with_svst2_u8() { + let mut storage = [0 as u8; 1280usize]; + let data = svcreate2_u8( + svindex_u8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_u8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_u8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld2_u8(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u8( + svget2_u8::<{ 0usize as i32 }>(loaded), + svindex_u8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_u8( + svget2_u8::<{ 1usize as i32 }>(loaded), + svindex_u8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_u16_with_svst2_u16() { + let mut storage = [0 as u16; 640usize]; + let data = svcreate2_u16( + svindex_u16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_u16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_u16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld2_u16(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u16( + svget2_u16::<{ 0usize as i32 }>(loaded), + svindex_u16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_u16( + svget2_u16::<{ 1usize as i32 }>(loaded), + svindex_u16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_u32_with_svst2_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svcreate2_u32( + svindex_u32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_u32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_u32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld2_u32(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_u32( + svget2_u32::<{ 0usize as i32 }>(loaded), + svindex_u32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_u32( + svget2_u32::<{ 1usize as i32 }>(loaded), + svindex_u32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_u64_with_svst2_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svcreate2_u64( + svindex_u64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_u64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_u64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld2_u64(svptrue_b64(), storage.as_ptr() as *const u64); + assert_vector_matches_u64( + svget2_u64::<{ 0usize as i32 }>(loaded), + svindex_u64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_u64( + svget2_u64::<{ 1usize as i32 }>(loaded), + svindex_u64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_f32_with_svst2_vnum_f32() { + let len = svcntw() as usize; + let mut storage = [0 as f32; 320usize]; + let data = svcreate2_f32( + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + ); + svst2_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld2_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1); + assert_vector_matches_f32( + svget2_f32::<{ 0usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f32( + svget2_f32::<{ 1usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_f64_with_svst2_vnum_f64() { + let len = svcntd() as usize; + let mut storage = [0 as f64; 160usize]; + let data = svcreate2_f64( + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + ); + svst2_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld2_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1); + assert_vector_matches_f64( + svget2_f64::<{ 0usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f64( + svget2_f64::<{ 1usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_s8_with_svst2_vnum_s8() { + let len = svcntb() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svcreate2_s8( + svindex_s8( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_s8( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld2_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i8( + svget2_s8::<{ 0usize as i32 }>(loaded), + svindex_s8( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i8( + svget2_s8::<{ 1usize as i32 }>(loaded), + svindex_s8( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_s16_with_svst2_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svcreate2_s16( + svindex_s16( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_s16( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld2_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i16( + svget2_s16::<{ 0usize as i32 }>(loaded), + svindex_s16( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i16( + svget2_s16::<{ 1usize as i32 }>(loaded), + svindex_s16( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_s32_with_svst2_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svcreate2_s32( + svindex_s32( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld2_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_i32( + svget2_s32::<{ 0usize as i32 }>(loaded), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i32( + svget2_s32::<{ 1usize as i32 }>(loaded), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_s64_with_svst2_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i64; 160usize]; + let data = svcreate2_s64( + svindex_s64( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld2_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1); + assert_vector_matches_i64( + svget2_s64::<{ 0usize as i32 }>(loaded), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i64( + svget2_s64::<{ 1usize as i32 }>(loaded), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_u8_with_svst2_vnum_u8() { + let len = svcntb() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svcreate2_u8( + svindex_u8( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_u8( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld2_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u8( + svget2_u8::<{ 0usize as i32 }>(loaded), + svindex_u8( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u8( + svget2_u8::<{ 1usize as i32 }>(loaded), + svindex_u8( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_u16_with_svst2_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svcreate2_u16( + svindex_u16( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_u16( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld2_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u16( + svget2_u16::<{ 0usize as i32 }>(loaded), + svindex_u16( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u16( + svget2_u16::<{ 1usize as i32 }>(loaded), + svindex_u16( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_u32_with_svst2_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svcreate2_u32( + svindex_u32( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_u32( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld2_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_u32( + svget2_u32::<{ 0usize as i32 }>(loaded), + svindex_u32( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u32( + svget2_u32::<{ 1usize as i32 }>(loaded), + svindex_u32( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_u64_with_svst2_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u64; 160usize]; + let data = svcreate2_u64( + svindex_u64( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_u64( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld2_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1); + assert_vector_matches_u64( + svget2_u64::<{ 0usize as i32 }>(loaded), + svindex_u64( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u64( + svget2_u64::<{ 1usize as i32 }>(loaded), + svindex_u64( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_f32_with_svst3_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcreate3_f32( + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); + svst3_f32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld3_f32(svptrue_b32(), storage.as_ptr() as *const f32); + assert_vector_matches_f32( + svget3_f32::<{ 0usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f32( + svget3_f32::<{ 1usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f32( + svget3_f32::<{ 2usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_f64_with_svst3_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcreate3_f64( + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); + svst3_f64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld3_f64(svptrue_b64(), storage.as_ptr() as *const f64); + assert_vector_matches_f64( + svget3_f64::<{ 0usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f64( + svget3_f64::<{ 1usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f64( + svget3_f64::<{ 2usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_s8_with_svst3_s8() { + let mut storage = [0 as i8; 1280usize]; + let data = svcreate3_s8( + svindex_s8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_s8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld3_s8(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i8( + svget3_s8::<{ 0usize as i32 }>(loaded), + svindex_s8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i8( + svget3_s8::<{ 1usize as i32 }>(loaded), + svindex_s8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i8( + svget3_s8::<{ 2usize as i32 }>(loaded), + svindex_s8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_s16_with_svst3_s16() { + let mut storage = [0 as i16; 640usize]; + let data = svcreate3_s16( + svindex_s16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_s16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld3_s16(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i16( + svget3_s16::<{ 0usize as i32 }>(loaded), + svindex_s16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i16( + svget3_s16::<{ 1usize as i32 }>(loaded), + svindex_s16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i16( + svget3_s16::<{ 2usize as i32 }>(loaded), + svindex_s16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_s32_with_svst3_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svcreate3_s32( + svindex_s32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_s32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld3_s32(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_i32( + svget3_s32::<{ 0usize as i32 }>(loaded), + svindex_s32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i32( + svget3_s32::<{ 1usize as i32 }>(loaded), + svindex_s32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i32( + svget3_s32::<{ 2usize as i32 }>(loaded), + svindex_s32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_s64_with_svst3_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svcreate3_s64( + svindex_s64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_s64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld3_s64(svptrue_b64(), storage.as_ptr() as *const i64); + assert_vector_matches_i64( + svget3_s64::<{ 0usize as i32 }>(loaded), + svindex_s64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i64( + svget3_s64::<{ 1usize as i32 }>(loaded), + svindex_s64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i64( + svget3_s64::<{ 2usize as i32 }>(loaded), + svindex_s64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_u8_with_svst3_u8() { + let mut storage = [0 as u8; 1280usize]; + let data = svcreate3_u8( + svindex_u8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_u8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld3_u8(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u8( + svget3_u8::<{ 0usize as i32 }>(loaded), + svindex_u8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u8( + svget3_u8::<{ 1usize as i32 }>(loaded), + svindex_u8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u8( + svget3_u8::<{ 2usize as i32 }>(loaded), + svindex_u8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_u16_with_svst3_u16() { + let mut storage = [0 as u16; 640usize]; + let data = svcreate3_u16( + svindex_u16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_u16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld3_u16(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u16( + svget3_u16::<{ 0usize as i32 }>(loaded), + svindex_u16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u16( + svget3_u16::<{ 1usize as i32 }>(loaded), + svindex_u16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u16( + svget3_u16::<{ 2usize as i32 }>(loaded), + svindex_u16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_u32_with_svst3_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svcreate3_u32( + svindex_u32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_u32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld3_u32(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_u32( + svget3_u32::<{ 0usize as i32 }>(loaded), + svindex_u32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u32( + svget3_u32::<{ 1usize as i32 }>(loaded), + svindex_u32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u32( + svget3_u32::<{ 2usize as i32 }>(loaded), + svindex_u32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_u64_with_svst3_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svcreate3_u64( + svindex_u64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_u64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld3_u64(svptrue_b64(), storage.as_ptr() as *const u64); + assert_vector_matches_u64( + svget3_u64::<{ 0usize as i32 }>(loaded), + svindex_u64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u64( + svget3_u64::<{ 1usize as i32 }>(loaded), + svindex_u64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u64( + svget3_u64::<{ 2usize as i32 }>(loaded), + svindex_u64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_f32_with_svst3_vnum_f32() { + let len = svcntw() as usize; + let mut storage = [0 as f32; 320usize]; + let data = svcreate3_f32( + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); + svst3_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld3_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1); + assert_vector_matches_f32( + svget3_f32::<{ 0usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f32( + svget3_f32::<{ 1usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f32( + svget3_f32::<{ 2usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_f64_with_svst3_vnum_f64() { + let len = svcntd() as usize; + let mut storage = [0 as f64; 160usize]; + let data = svcreate3_f64( + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); + svst3_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld3_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1); + assert_vector_matches_f64( + svget3_f64::<{ 0usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f64( + svget3_f64::<{ 1usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f64( + svget3_f64::<{ 2usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_s8_with_svst3_vnum_s8() { + let len = svcntb() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svcreate3_s8( + svindex_s8( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s8( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s8( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld3_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i8( + svget3_s8::<{ 0usize as i32 }>(loaded), + svindex_s8( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i8( + svget3_s8::<{ 1usize as i32 }>(loaded), + svindex_s8( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i8( + svget3_s8::<{ 2usize as i32 }>(loaded), + svindex_s8( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_s16_with_svst3_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svcreate3_s16( + svindex_s16( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s16( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s16( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld3_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i16( + svget3_s16::<{ 0usize as i32 }>(loaded), + svindex_s16( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i16( + svget3_s16::<{ 1usize as i32 }>(loaded), + svindex_s16( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i16( + svget3_s16::<{ 2usize as i32 }>(loaded), + svindex_s16( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_s32_with_svst3_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svcreate3_s32( + svindex_s32( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld3_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_i32( + svget3_s32::<{ 0usize as i32 }>(loaded), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i32( + svget3_s32::<{ 1usize as i32 }>(loaded), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i32( + svget3_s32::<{ 2usize as i32 }>(loaded), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_s64_with_svst3_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i64; 160usize]; + let data = svcreate3_s64( + svindex_s64( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld3_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1); + assert_vector_matches_i64( + svget3_s64::<{ 0usize as i32 }>(loaded), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i64( + svget3_s64::<{ 1usize as i32 }>(loaded), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i64( + svget3_s64::<{ 2usize as i32 }>(loaded), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_u8_with_svst3_vnum_u8() { + let len = svcntb() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svcreate3_u8( + svindex_u8( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u8( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u8( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld3_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u8( + svget3_u8::<{ 0usize as i32 }>(loaded), + svindex_u8( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u8( + svget3_u8::<{ 1usize as i32 }>(loaded), + svindex_u8( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u8( + svget3_u8::<{ 2usize as i32 }>(loaded), + svindex_u8( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_u16_with_svst3_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svcreate3_u16( + svindex_u16( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u16( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u16( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld3_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u16( + svget3_u16::<{ 0usize as i32 }>(loaded), + svindex_u16( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u16( + svget3_u16::<{ 1usize as i32 }>(loaded), + svindex_u16( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u16( + svget3_u16::<{ 2usize as i32 }>(loaded), + svindex_u16( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_u32_with_svst3_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svcreate3_u32( + svindex_u32( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u32( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u32( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld3_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_u32( + svget3_u32::<{ 0usize as i32 }>(loaded), + svindex_u32( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u32( + svget3_u32::<{ 1usize as i32 }>(loaded), + svindex_u32( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u32( + svget3_u32::<{ 2usize as i32 }>(loaded), + svindex_u32( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_u64_with_svst3_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u64; 160usize]; + let data = svcreate3_u64( + svindex_u64( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u64( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u64( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld3_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1); + assert_vector_matches_u64( + svget3_u64::<{ 0usize as i32 }>(loaded), + svindex_u64( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u64( + svget3_u64::<{ 1usize as i32 }>(loaded), + svindex_u64( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u64( + svget3_u64::<{ 2usize as i32 }>(loaded), + svindex_u64( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_f32_with_svst4_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcreate4_f32( + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + svst4_f32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld4_f32(svptrue_b32(), storage.as_ptr() as *const f32); + assert_vector_matches_f32( + svget4_f32::<{ 0usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f32( + svget4_f32::<{ 1usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f32( + svget4_f32::<{ 2usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f32( + svget4_f32::<{ 3usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_f64_with_svst4_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcreate4_f64( + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + svst4_f64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld4_f64(svptrue_b64(), storage.as_ptr() as *const f64); + assert_vector_matches_f64( + svget4_f64::<{ 0usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f64( + svget4_f64::<{ 1usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f64( + svget4_f64::<{ 2usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f64( + svget4_f64::<{ 3usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_s8_with_svst4_s8() { + let mut storage = [0 as i8; 1280usize]; + let data = svcreate4_s8( + svindex_s8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_s8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld4_s8(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i8( + svget4_s8::<{ 0usize as i32 }>(loaded), + svindex_s8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i8( + svget4_s8::<{ 1usize as i32 }>(loaded), + svindex_s8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i8( + svget4_s8::<{ 2usize as i32 }>(loaded), + svindex_s8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i8( + svget4_s8::<{ 3usize as i32 }>(loaded), + svindex_s8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_s16_with_svst4_s16() { + let mut storage = [0 as i16; 640usize]; + let data = svcreate4_s16( + svindex_s16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_s16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld4_s16(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i16( + svget4_s16::<{ 0usize as i32 }>(loaded), + svindex_s16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i16( + svget4_s16::<{ 1usize as i32 }>(loaded), + svindex_s16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i16( + svget4_s16::<{ 2usize as i32 }>(loaded), + svindex_s16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i16( + svget4_s16::<{ 3usize as i32 }>(loaded), + svindex_s16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_s32_with_svst4_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svcreate4_s32( + svindex_s32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_s32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld4_s32(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_i32( + svget4_s32::<{ 0usize as i32 }>(loaded), + svindex_s32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i32( + svget4_s32::<{ 1usize as i32 }>(loaded), + svindex_s32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i32( + svget4_s32::<{ 2usize as i32 }>(loaded), + svindex_s32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i32( + svget4_s32::<{ 3usize as i32 }>(loaded), + svindex_s32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_s64_with_svst4_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svcreate4_s64( + svindex_s64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_s64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld4_s64(svptrue_b64(), storage.as_ptr() as *const i64); + assert_vector_matches_i64( + svget4_s64::<{ 0usize as i32 }>(loaded), + svindex_s64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i64( + svget4_s64::<{ 1usize as i32 }>(loaded), + svindex_s64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i64( + svget4_s64::<{ 2usize as i32 }>(loaded), + svindex_s64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i64( + svget4_s64::<{ 3usize as i32 }>(loaded), + svindex_s64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_u8_with_svst4_u8() { + let mut storage = [0 as u8; 1280usize]; + let data = svcreate4_u8( + svindex_u8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_u8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld4_u8(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u8( + svget4_u8::<{ 0usize as i32 }>(loaded), + svindex_u8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u8( + svget4_u8::<{ 1usize as i32 }>(loaded), + svindex_u8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u8( + svget4_u8::<{ 2usize as i32 }>(loaded), + svindex_u8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u8( + svget4_u8::<{ 3usize as i32 }>(loaded), + svindex_u8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_u16_with_svst4_u16() { + let mut storage = [0 as u16; 640usize]; + let data = svcreate4_u16( + svindex_u16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_u16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld4_u16(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u16( + svget4_u16::<{ 0usize as i32 }>(loaded), + svindex_u16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u16( + svget4_u16::<{ 1usize as i32 }>(loaded), + svindex_u16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u16( + svget4_u16::<{ 2usize as i32 }>(loaded), + svindex_u16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u16( + svget4_u16::<{ 3usize as i32 }>(loaded), + svindex_u16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_u32_with_svst4_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svcreate4_u32( + svindex_u32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_u32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld4_u32(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_u32( + svget4_u32::<{ 0usize as i32 }>(loaded), + svindex_u32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u32( + svget4_u32::<{ 1usize as i32 }>(loaded), + svindex_u32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u32( + svget4_u32::<{ 2usize as i32 }>(loaded), + svindex_u32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u32( + svget4_u32::<{ 3usize as i32 }>(loaded), + svindex_u32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_u64_with_svst4_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svcreate4_u64( + svindex_u64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_u64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld4_u64(svptrue_b64(), storage.as_ptr() as *const u64); + assert_vector_matches_u64( + svget4_u64::<{ 0usize as i32 }>(loaded), + svindex_u64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u64( + svget4_u64::<{ 1usize as i32 }>(loaded), + svindex_u64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u64( + svget4_u64::<{ 2usize as i32 }>(loaded), + svindex_u64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u64( + svget4_u64::<{ 3usize as i32 }>(loaded), + svindex_u64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_f32_with_svst4_vnum_f32() { + let len = svcntw() as usize; + let mut storage = [0 as f32; 320usize]; + let data = svcreate4_f32( + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + svst4_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld4_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1); + assert_vector_matches_f32( + svget4_f32::<{ 0usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f32( + svget4_f32::<{ 1usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f32( + svget4_f32::<{ 2usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f32( + svget4_f32::<{ 3usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_f64_with_svst4_vnum_f64() { + let len = svcntd() as usize; + let mut storage = [0 as f64; 160usize]; + let data = svcreate4_f64( + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + svst4_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld4_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1); + assert_vector_matches_f64( + svget4_f64::<{ 0usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f64( + svget4_f64::<{ 1usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f64( + svget4_f64::<{ 2usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f64( + svget4_f64::<{ 3usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_s8_with_svst4_vnum_s8() { + let len = svcntb() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svcreate4_s8( + svindex_s8( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s8( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s8( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s8( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld4_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i8( + svget4_s8::<{ 0usize as i32 }>(loaded), + svindex_s8( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i8( + svget4_s8::<{ 1usize as i32 }>(loaded), + svindex_s8( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i8( + svget4_s8::<{ 2usize as i32 }>(loaded), + svindex_s8( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i8( + svget4_s8::<{ 3usize as i32 }>(loaded), + svindex_s8( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_s16_with_svst4_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svcreate4_s16( + svindex_s16( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s16( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s16( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s16( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld4_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i16( + svget4_s16::<{ 0usize as i32 }>(loaded), + svindex_s16( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i16( + svget4_s16::<{ 1usize as i32 }>(loaded), + svindex_s16( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i16( + svget4_s16::<{ 2usize as i32 }>(loaded), + svindex_s16( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i16( + svget4_s16::<{ 3usize as i32 }>(loaded), + svindex_s16( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_s32_with_svst4_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svcreate4_s32( + svindex_s32( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s32( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld4_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_i32( + svget4_s32::<{ 0usize as i32 }>(loaded), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i32( + svget4_s32::<{ 1usize as i32 }>(loaded), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i32( + svget4_s32::<{ 2usize as i32 }>(loaded), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i32( + svget4_s32::<{ 3usize as i32 }>(loaded), + svindex_s32( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_s64_with_svst4_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i64; 160usize]; + let data = svcreate4_s64( + svindex_s64( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s64( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld4_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1); + assert_vector_matches_i64( + svget4_s64::<{ 0usize as i32 }>(loaded), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i64( + svget4_s64::<{ 1usize as i32 }>(loaded), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i64( + svget4_s64::<{ 2usize as i32 }>(loaded), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i64( + svget4_s64::<{ 3usize as i32 }>(loaded), + svindex_s64( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_u8_with_svst4_vnum_u8() { + let len = svcntb() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svcreate4_u8( + svindex_u8( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u8( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u8( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u8( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld4_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u8( + svget4_u8::<{ 0usize as i32 }>(loaded), + svindex_u8( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u8( + svget4_u8::<{ 1usize as i32 }>(loaded), + svindex_u8( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u8( + svget4_u8::<{ 2usize as i32 }>(loaded), + svindex_u8( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u8( + svget4_u8::<{ 3usize as i32 }>(loaded), + svindex_u8( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_u16_with_svst4_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svcreate4_u16( + svindex_u16( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u16( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u16( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u16( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld4_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u16( + svget4_u16::<{ 0usize as i32 }>(loaded), + svindex_u16( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u16( + svget4_u16::<{ 1usize as i32 }>(loaded), + svindex_u16( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u16( + svget4_u16::<{ 2usize as i32 }>(loaded), + svindex_u16( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u16( + svget4_u16::<{ 3usize as i32 }>(loaded), + svindex_u16( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_u32_with_svst4_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svcreate4_u32( + svindex_u32( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u32( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u32( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u32( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld4_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_u32( + svget4_u32::<{ 0usize as i32 }>(loaded), + svindex_u32( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u32( + svget4_u32::<{ 1usize as i32 }>(loaded), + svindex_u32( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u32( + svget4_u32::<{ 2usize as i32 }>(loaded), + svindex_u32( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u32( + svget4_u32::<{ 3usize as i32 }>(loaded), + svindex_u32( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_u64_with_svst4_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u64; 160usize]; + let data = svcreate4_u64( + svindex_u64( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u64( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u64( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u64( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld4_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1); + assert_vector_matches_u64( + svget4_u64::<{ 0usize as i32 }>(loaded), + svindex_u64( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u64( + svget4_u64::<{ 1usize as i32 }>(loaded), + svindex_u64( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u64( + svget4_u64::<{ 2usize as i32 }>(loaded), + svindex_u64( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u64( + svget4_u64::<{ 3usize as i32 }>(loaded), + svindex_u64( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_f32() { + svsetffr(); + let _ = svld1_f32(svptrue_b32(), F32_DATA.as_ptr()); + let loaded = svldff1_f32(svptrue_b32(), F32_DATA.as_ptr()); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_f64() { + svsetffr(); + let _ = svld1_f64(svptrue_b64(), F64_DATA.as_ptr()); + let loaded = svldff1_f64(svptrue_b64(), F64_DATA.as_ptr()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_s8() { + svsetffr(); + let _ = svld1_s8(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1_s8(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i8( + loaded, + svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_s16() { + svsetffr(); + let _ = svld1_s16(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1_s16(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_s32() { + svsetffr(); + let _ = svld1_s32(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldff1_s32(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_s64() { + svsetffr(); + let _ = svld1_s64(svptrue_b64(), I64_DATA.as_ptr()); + let loaded = svldff1_s64(svptrue_b64(), I64_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_u8() { + svsetffr(); + let _ = svld1_u8(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1_u8(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u8( + loaded, + svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_u16() { + svsetffr(); + let _ = svld1_u16(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldff1_u16(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_u32() { + svsetffr(); + let _ = svld1_u32(svptrue_b32(), U32_DATA.as_ptr()); + let loaded = svldff1_u32(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_u64() { + svsetffr(); + let _ = svld1_u64(svptrue_b64(), U64_DATA.as_ptr()); + let loaded = svldff1_u64(svptrue_b64(), U64_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_f32() { + svsetffr(); + let _ = svld1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_f64() { + svsetffr(); + let _ = svld1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_s8() { + svsetffr(); + let _ = svld1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntb() as usize; + assert_vector_matches_i8( + loaded, + svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_s16() { + svsetffr(); + let _ = svld1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_s32() { + svsetffr(); + let _ = svld1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_s64() { + svsetffr(); + let _ = svld1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_u8() { + svsetffr(); + let _ = svld1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntb() as usize; + assert_vector_matches_u8( + loaded, + svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_u16() { + svsetffr(); + let _ = svld1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_u32() { + svsetffr(); + let _ = svld1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_u64() { + svsetffr(); + let _ = svld1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_s16() { + svsetffr(); + let _ = svld1sb_s16(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_s16(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_s32() { + svsetffr(); + let _ = svld1sb_s32(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_s32(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_s32() { + svsetffr(); + let _ = svld1sh_s32(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1sh_s32(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_s64() { + svsetffr(); + let _ = svld1sb_s64(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_s64(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_s64() { + svsetffr(); + let _ = svld1sh_s64(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1sh_s64(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_s64() { + svsetffr(); + let _ = svld1sw_s64(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldff1sw_s64(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_u16() { + svsetffr(); + let _ = svld1sb_u16(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_u16(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_u32() { + svsetffr(); + let _ = svld1sb_u32(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_u32(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_u32() { + svsetffr(); + let _ = svld1sh_u32(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1sh_u32(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_u64() { + svsetffr(); + let _ = svld1sb_u64(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_u64(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_u64() { + svsetffr(); + let _ = svld1sh_u64(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1sh_u64(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_u64() { + svsetffr(); + let _ = svld1sw_u64(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldff1sw_u64(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_s16() { + svsetffr(); + let _ = svld1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_s32() { + svsetffr(); + let _ = svld1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_vnum_s32() { + svsetffr(); + let _ = svld1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_s64() { + svsetffr(); + let _ = svld1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_vnum_s64() { + svsetffr(); + let _ = svld1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_vnum_s64() { + svsetffr(); + let _ = svld1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldff1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_u16() { + svsetffr(); + let _ = svld1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_u32() { + svsetffr(); + let _ = svld1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_vnum_u32() { + svsetffr(); + let _ = svld1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_u64() { + svsetffr(); + let _ = svld1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_vnum_u64() { + svsetffr(); + let _ = svld1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_vnum_u64() { + svsetffr(); + let _ = svld1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldff1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_s16() { + svsetffr(); + let _ = svld1ub_s16(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1ub_s16(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_s32() { + svsetffr(); + let _ = svld1ub_s32(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1ub_s32(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_s32() { + svsetffr(); + let _ = svld1uh_s32(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldff1uh_s32(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_s64() { + svsetffr(); + let _ = svld1ub_s64(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1ub_s64(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_s64() { + svsetffr(); + let _ = svld1uh_s64(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldff1uh_s64(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_s64() { + svsetffr(); + let _ = svld1uw_s64(svptrue_b32(), U32_DATA.as_ptr()); + let loaded = svldff1uw_s64(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_u16() { + svsetffr(); + let _ = svld1ub_u16(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1ub_u16(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_u32() { + svsetffr(); + let _ = svld1ub_u32(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1ub_u32(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_u32() { + svsetffr(); + let _ = svld1uh_u32(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldff1uh_u32(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_u64() { + svsetffr(); + let _ = svld1ub_u64(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1ub_u64(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_u64() { + svsetffr(); + let _ = svld1uh_u64(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldff1uh_u64(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_u64() { + svsetffr(); + let _ = svld1uw_u64(svptrue_b32(), U32_DATA.as_ptr()); + let loaded = svldff1uw_u64(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_vnum_s16() { + svsetffr(); + let _ = svld1ub_vnum_s16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1ub_vnum_s16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_vnum_s32() { + svsetffr(); + let _ = svld1ub_vnum_s32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1ub_vnum_s32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_vnum_s32() { + svsetffr(); + let _ = svld1uh_vnum_s32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldff1uh_vnum_s32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_vnum_s64() { + svsetffr(); + let _ = svld1ub_vnum_s64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1ub_vnum_s64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_vnum_s64() { + svsetffr(); + let _ = svld1uh_vnum_s64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldff1uh_vnum_s64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_vnum_s64() { + svsetffr(); + let _ = svld1uw_vnum_s64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldff1uw_vnum_s64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_vnum_u16() { + svsetffr(); + let _ = svld1ub_vnum_u16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1ub_vnum_u16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_vnum_u32() { + svsetffr(); + let _ = svld1ub_vnum_u32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1ub_vnum_u32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_vnum_u32() { + svsetffr(); + let _ = svld1uh_vnum_u32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldff1uh_vnum_u32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_vnum_u64() { + svsetffr(); + let _ = svld1ub_vnum_u64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1ub_vnum_u64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_vnum_u64() { + svsetffr(); + let _ = svld1uh_vnum_u64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldff1uh_vnum_u64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_vnum_u64() { + svsetffr(); + let _ = svld1uw_vnum_u64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldff1uw_vnum_u64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_f32() { + svsetffr(); + let _ = svld1_f32(svptrue_b32(), F32_DATA.as_ptr()); + let loaded = svldnf1_f32(svptrue_b32(), F32_DATA.as_ptr()); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_f64() { + svsetffr(); + let _ = svld1_f64(svptrue_b64(), F64_DATA.as_ptr()); + let loaded = svldnf1_f64(svptrue_b64(), F64_DATA.as_ptr()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_s8() { + svsetffr(); + let _ = svld1_s8(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1_s8(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i8( + loaded, + svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_s16() { + svsetffr(); + let _ = svld1_s16(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldnf1_s16(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_s32() { + svsetffr(); + let _ = svld1_s32(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldnf1_s32(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_s64() { + svsetffr(); + let _ = svld1_s64(svptrue_b64(), I64_DATA.as_ptr()); + let loaded = svldnf1_s64(svptrue_b64(), I64_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_u8() { + svsetffr(); + let _ = svld1_u8(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1_u8(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u8( + loaded, + svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_u16() { + svsetffr(); + let _ = svld1_u16(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldnf1_u16(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_u32() { + svsetffr(); + let _ = svld1_u32(svptrue_b32(), U32_DATA.as_ptr()); + let loaded = svldnf1_u32(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_u64() { + svsetffr(); + let _ = svld1_u64(svptrue_b64(), U64_DATA.as_ptr()); + let loaded = svldnf1_u64(svptrue_b64(), U64_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_f32() { + svsetffr(); + let _ = svld1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_f64() { + svsetffr(); + let _ = svld1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_s8() { + svsetffr(); + let _ = svld1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntb() as usize; + assert_vector_matches_i8( + loaded, + svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_s16() { + svsetffr(); + let _ = svld1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_s32() { + svsetffr(); + let _ = svld1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_s64() { + svsetffr(); + let _ = svld1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_u8() { + svsetffr(); + let _ = svld1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntb() as usize; + assert_vector_matches_u8( + loaded, + svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_u16() { + svsetffr(); + let _ = svld1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_u32() { + svsetffr(); + let _ = svld1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_u64() { + svsetffr(); + let _ = svld1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_s16() { + svsetffr(); + let _ = svld1sb_s16(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1sb_s16(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_s32() { + svsetffr(); + let _ = svld1sb_s32(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1sb_s32(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_s32() { + svsetffr(); + let _ = svld1sh_s32(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldnf1sh_s32(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_s64() { + svsetffr(); + let _ = svld1sb_s64(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1sb_s64(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_s64() { + svsetffr(); + let _ = svld1sh_s64(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldnf1sh_s64(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sw_s64() { + svsetffr(); + let _ = svld1sw_s64(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldnf1sw_s64(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_u16() { + svsetffr(); + let _ = svld1sb_u16(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1sb_u16(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_u32() { + svsetffr(); + let _ = svld1sb_u32(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1sb_u32(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_u32() { + svsetffr(); + let _ = svld1sh_u32(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldnf1sh_u32(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_u64() { + svsetffr(); + let _ = svld1sb_u64(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1sb_u64(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_u64() { + svsetffr(); + let _ = svld1sh_u64(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldnf1sh_u64(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sw_u64() { + svsetffr(); + let _ = svld1sw_u64(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldnf1sw_u64(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_vnum_s16() { + svsetffr(); + let _ = svld1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_vnum_s32() { + svsetffr(); + let _ = svld1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_vnum_s32() { + svsetffr(); + let _ = svld1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldnf1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_vnum_s64() { + svsetffr(); + let _ = svld1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_vnum_s64() { + svsetffr(); + let _ = svld1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldnf1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sw_vnum_s64() { + svsetffr(); + let _ = svld1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldnf1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_vnum_u16() { + svsetffr(); + let _ = svld1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_vnum_u32() { + svsetffr(); + let _ = svld1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_vnum_u32() { + svsetffr(); + let _ = svld1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldnf1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_vnum_u64() { + svsetffr(); + let _ = svld1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_vnum_u64() { + svsetffr(); + let _ = svld1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldnf1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sw_vnum_u64() { + svsetffr(); + let _ = svld1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldnf1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_s16() { + svsetffr(); + let _ = svld1ub_s16(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1ub_s16(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_s32() { + svsetffr(); + let _ = svld1ub_s32(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1ub_s32(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_s32() { + svsetffr(); + let _ = svld1uh_s32(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldnf1uh_s32(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_s64() { + svsetffr(); + let _ = svld1ub_s64(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1ub_s64(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_s64() { + svsetffr(); + let _ = svld1uh_s64(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldnf1uh_s64(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uw_s64() { + svsetffr(); + let _ = svld1uw_s64(svptrue_b32(), U32_DATA.as_ptr()); + let loaded = svldnf1uw_s64(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_u16() { + svsetffr(); + let _ = svld1ub_u16(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1ub_u16(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_u32() { + svsetffr(); + let _ = svld1ub_u32(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1ub_u32(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_u32() { + svsetffr(); + let _ = svld1uh_u32(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldnf1uh_u32(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_u64() { + svsetffr(); + let _ = svld1ub_u64(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1ub_u64(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_u64() { + svsetffr(); + let _ = svld1uh_u64(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldnf1uh_u64(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uw_u64() { + svsetffr(); + let _ = svld1uw_u64(svptrue_b32(), U32_DATA.as_ptr()); + let loaded = svldnf1uw_u64(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_vnum_s16() { + svsetffr(); + let _ = svld1ub_vnum_s16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1ub_vnum_s16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_vnum_s32() { + svsetffr(); + let _ = svld1ub_vnum_s32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1ub_vnum_s32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_vnum_s32() { + svsetffr(); + let _ = svld1uh_vnum_s32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldnf1uh_vnum_s32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_vnum_s64() { + svsetffr(); + let _ = svld1ub_vnum_s64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1ub_vnum_s64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_vnum_s64() { + svsetffr(); + let _ = svld1uh_vnum_s64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldnf1uh_vnum_s64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uw_vnum_s64() { + svsetffr(); + let _ = svld1uw_vnum_s64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldnf1uw_vnum_s64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_vnum_u16() { + svsetffr(); + let _ = svld1ub_vnum_u16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1ub_vnum_u16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_vnum_u32() { + svsetffr(); + let _ = svld1ub_vnum_u32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1ub_vnum_u32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_vnum_u32() { + svsetffr(); + let _ = svld1uh_vnum_u32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldnf1uh_vnum_u32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_vnum_u64() { + svsetffr(); + let _ = svld1ub_vnum_u64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1ub_vnum_u64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_vnum_u64() { + svsetffr(); + let _ = svld1uh_vnum_u64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldnf1uh_vnum_u64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uw_vnum_u64() { + svsetffr(); + let _ = svld1uw_vnum_u64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldnf1uw_vnum_u64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_f32_with_svstnt1_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + svstnt1_f32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svldnt1_f32(svptrue_b32(), storage.as_ptr() as *const f32); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_f64_with_svstnt1_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + svstnt1_f64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svldnt1_f64(svptrue_b64(), storage.as_ptr() as *const f64); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_s8_with_svstnt1_s8() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_s8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1_s8(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i8( + loaded, + svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_s16_with_svstnt1_s16() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_s16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1_s16(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_s32_with_svstnt1_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_s32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1_s32(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_s64_with_svstnt1_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_s64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svldnt1_s64(svptrue_b64(), storage.as_ptr() as *const i64); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_u8_with_svstnt1_u8() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_u8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svldnt1_u8(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u8( + loaded, + svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_u16_with_svstnt1_u16() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_u16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svldnt1_u16(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_u32_with_svstnt1_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_u32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svldnt1_u32(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_u64_with_svstnt1_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_u64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svldnt1_u64(svptrue_b64(), storage.as_ptr() as *const u64); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_f32_with_svstnt1_vnum_f32() { + let len = svcntw() as usize; + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); + svstnt1_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svldnt1_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_f64_with_svstnt1_vnum_f64() { + let len = svcntd() as usize; + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); + svstnt1_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svldnt1_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_s8_with_svstnt1_vnum_s8() { + let len = svcntb() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i8( + loaded, + svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_s16_with_svstnt1_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_s32_with_svstnt1_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_s64_with_svstnt1_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svldnt1_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_u8_with_svstnt1_vnum_u8() { + let len = svcntb() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svldnt1_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u8( + loaded, + svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_u16_with_svstnt1_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svldnt1_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_u32_with_svstnt1_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svldnt1_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_u64_with_svstnt1_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svldnt1_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb() { + svsetffr(); + let loaded = svprfb::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b8(), I64_DATA.as_ptr()); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh() { + svsetffr(); + let loaded = svprfh::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b16(), I64_DATA.as_ptr()); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw() { + svsetffr(); + let loaded = svprfw::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b32(), I64_DATA.as_ptr()); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd() { + svsetffr(); + let loaded = svprfd::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b64(), I64_DATA.as_ptr()); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_s32offset() { + let offsets = svindex_s32(0, 4u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfb_gather_s32offset::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + offsets, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_s32index() { + let indices = svindex_s32(0, 1); + svsetffr(); + let loaded = svprfh_gather_s32index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_s32index() { + let indices = svindex_s32(0, 1); + svsetffr(); + let loaded = svprfw_gather_s32index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_s32index() { + let indices = svindex_s32(0, 1); + svsetffr(); + let loaded = svprfd_gather_s32index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_s64offset() { + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfb_gather_s64offset::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + offsets, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_s64index() { + let indices = svindex_s64(0, 1); + svsetffr(); + let loaded = svprfh_gather_s64index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_s64index() { + let indices = svindex_s64(0, 1); + svsetffr(); + let loaded = svprfw_gather_s64index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_s64index() { + let indices = svindex_s64(0, 1); + svsetffr(); + let loaded = svprfd_gather_s64index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_u32offset() { + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfb_gather_u32offset::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + offsets, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_u32index() { + let indices = svindex_u32(0, 1); + svsetffr(); + let loaded = svprfh_gather_u32index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_u32index() { + let indices = svindex_u32(0, 1); + svsetffr(); + let loaded = svprfw_gather_u32index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_u32index() { + let indices = svindex_u32(0, 1); + svsetffr(); + let loaded = svprfd_gather_u32index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_u64offset() { + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfb_gather_u64offset::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + offsets, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_u64index() { + let indices = svindex_u64(0, 1); + svsetffr(); + let loaded = svprfh_gather_u64index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_u64index() { + let indices = svindex_u64(0, 1); + svsetffr(); + let loaded = svprfw_gather_u64index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_u64index() { + let indices = svindex_u64(0, 1); + svsetffr(); + let loaded = svprfd_gather_u64index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_u64base() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfb_gather_u64base::<{ svprfop::SV_PLDL1KEEP }>(svptrue_b64(), bases); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_u64base() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfh_gather_u64base::<{ svprfop::SV_PLDL1KEEP }>(svptrue_b64(), bases); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_u64base() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfw_gather_u64base::<{ svprfop::SV_PLDL1KEEP }>(svptrue_b64(), bases); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_u64base() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfd_gather_u64base::<{ svprfop::SV_PLDL1KEEP }>(svptrue_b64(), bases); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_u32base_offset() { + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfb_gather_u32base_offset::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b32(), + bases, + U32_DATA.as_ptr() as i64 + 4u32 as i64, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_u32base_index() { + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfh_gather_u32base_index::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b32(), + bases, + U32_DATA.as_ptr() as i64 / (4u32 as i64) + 1, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_u32base_index() { + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfw_gather_u32base_index::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b32(), + bases, + U32_DATA.as_ptr() as i64 / (4u32 as i64) + 1, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_u32base_index() { + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfd_gather_u32base_index::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b32(), + bases, + U32_DATA.as_ptr() as i64 / (4u32 as i64) + 1, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_u64base_offset() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfb_gather_u64base_offset::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b64(), + bases, + 8u32.try_into().unwrap(), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_u64base_index() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfh_gather_u64base_index::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b64(), + bases, + 1.try_into().unwrap(), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_u64base_index() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfw_gather_u64base_index::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b64(), + bases, + 1.try_into().unwrap(), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_u64base_index() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfd_gather_u64base_index::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b64(), + bases, + 1.try_into().unwrap(), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_vnum() { + svsetffr(); + let loaded = svprfb_vnum::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b8(), I64_DATA.as_ptr(), 1); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_vnum() { + svsetffr(); + let loaded = svprfh_vnum::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b16(), I64_DATA.as_ptr(), 1); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_vnum() { + svsetffr(); + let loaded = svprfw_vnum::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b32(), I64_DATA.as_ptr(), 1); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_vnum() { + svsetffr(); + let loaded = svprfd_vnum::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b64(), I64_DATA.as_ptr(), 1); +} +#[simd_test(enable = "sve")] +unsafe fn test_ffr() { + svsetffr(); + let ffr = svrdffr(); + assert_vector_matches_u8(svdup_n_u8_z(ffr, 1), svindex_u8(1, 0)); + let pred = svdupq_n_b8( + true, false, true, false, true, false, true, false, true, false, true, false, true, false, + true, false, + ); + svwrffr(pred); + let ffr = svrdffr_z(svptrue_b8()); + assert_vector_matches_u8(svdup_n_u8_z(ffr, 1), svdup_n_u8_z(pred, 1)); +} diff --git a/crates/core_arch/src/aarch64/sve/mod.rs b/crates/core_arch/src/aarch64/sve/mod.rs new file mode 100644 index 0000000000..a3f70ab61c --- /dev/null +++ b/crates/core_arch/src/aarch64/sve/mod.rs @@ -0,0 +1,379 @@ +//! SVE intrinsics + +#![allow(non_camel_case_types)] + +// `generated.rs` has a `super::*` and this import is for that +use crate::intrinsics::{simd::*, *}; + +#[rustfmt::skip] +mod generated; +#[rustfmt::skip] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub use self::generated::*; + +use crate::{marker::ConstParamTy, mem::transmute}; + +pub(super) trait AsUnsigned { + type Unsigned; + unsafe fn as_unsigned(self) -> Self::Unsigned; +} + +pub(super) trait AsSigned { + type Signed; + unsafe fn as_signed(self) -> Self::Signed; +} + +/// Same as `Into` but with into being unsafe so that it can have the required `target_feature` +pub(super) trait SveInto: Sized { + unsafe fn sve_into(self) -> T; +} + +macro_rules! impl_sve_type { + ($(($v:vis, $elem_type:ty, $name:ident, $elt:literal))*) => ($( + #[doc = concat!("Scalable vector of type ", stringify!($elem_type))] + #[derive(Clone, Copy, Debug)] + #[rustc_scalable_vector($elt)] + #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] + $v struct $name($elem_type); + )*) +} + +macro_rules! impl_sve_tuple_type { + ($(($v:vis, $vec_type:ty, $elt:tt, $name:ident))*) => ($( + impl_sve_tuple_type!(@ ($v, $vec_type, $elt, $name)); + )*); + (@ ($v:vis, $vec_type:ty, 2, $name:ident)) => ( + #[doc = concat!("Two-element tuple of scalable vectors of type ", stringify!($vec_type))] + #[derive(Clone, Copy, Debug)] + #[rustc_scalable_vector] + #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] + $v struct $name($vec_type, $vec_type); + ); + (@ ($v:vis, $vec_type:ty, 3, $name:ident)) => ( + #[doc = concat!("Three-element tuple of scalable vectors of type ", stringify!($vec_type))] + #[derive(Clone, Copy, Debug)] + #[rustc_scalable_vector] + #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] + $v struct $name($vec_type, $vec_type, $vec_type); + ); + (@ ($v:vis, $vec_type:ty, 4, $name:ident)) => ( + #[doc = concat!("Four-element tuple of scalable vectors of type ", stringify!($vec_type))] + #[derive(Clone, Copy, Debug)] + #[rustc_scalable_vector] + #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] + $v struct $name($vec_type, $vec_type, $vec_type, $vec_type); + ); +} + +macro_rules! impl_sign_conversions_sv { + ($(($signed:ty, $unsigned:ty))*) => ($( + impl AsUnsigned for $signed { + type Unsigned = $unsigned; + + #[inline] + #[target_feature(enable = "sve")] + unsafe fn as_unsigned(self) -> $unsigned { + transmute_unchecked(self) + } + } + + impl AsSigned for $unsigned { + type Signed = $signed; + + #[inline] + #[target_feature(enable = "sve")] + unsafe fn as_signed(self) -> $signed { + transmute_unchecked(self) + } + } + )*) +} + +macro_rules! impl_sign_conversions { + ($(($signed:ty, $unsigned:ty))*) => ($( + impl AsUnsigned for $signed { + type Unsigned = $unsigned; + + #[inline] + #[target_feature(enable = "sve")] + unsafe fn as_unsigned(self) -> $unsigned { + transmute(self) + } + } + + impl AsSigned for $unsigned { + type Signed = $signed; + + #[inline] + #[target_feature(enable = "sve")] + unsafe fn as_signed(self) -> $signed { + transmute(self) + } + } + )*) +} + +/// LLVM requires the predicate lane count to be the same as the lane count +/// it's working with. However the ACLE only defines one bool type and the +/// instruction set doesn't have this distinction. As a result we have to +/// create these internal types so we can match the LLVM signature. Each of +/// these internal types can be converted to the public `svbool_t` type and +/// the `svbool_t` type can be converted into these. +macro_rules! impl_internal_sve_predicate { + ($(($name:ident, $elt:literal))*) => ($( + impl_sve_type! { + (pub(super), bool, $name, $elt) + } + + impl SveInto for $name { + #[inline] + #[target_feature(enable = "sve")] + unsafe fn sve_into(self) -> svbool_t { + #[allow(improper_ctypes)] + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = concat!("llvm.aarch64.sve.convert.to.svbool.nxv", $elt, "i1") + )] + fn convert_to_svbool(b: $name) -> svbool_t; + } + unsafe { convert_to_svbool(self) } + } + } + + #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] + impl SveInto<$name> for svbool_t { + #[inline] + #[target_feature(enable = "sve")] + unsafe fn sve_into(self) -> $name { + #[allow(improper_ctypes)] + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = concat!("llvm.aarch64.sve.convert.from.svbool.nxv", $elt, "i1") + )] + fn convert_from_svbool(b: svbool_t) -> $name; + } + unsafe { convert_from_svbool(self) } + } + } + )*) +} + +impl_sve_type! { + (pub, bool, svbool_t, 16) + + (pub, i8, svint8_t, 16) + (pub, u8, svuint8_t, 16) + + (pub, i16, svint16_t, 8) + (pub, u16, svuint16_t, 8) + (pub, f32, svfloat32_t, 4) + (pub, i32, svint32_t, 4) + (pub, u32, svuint32_t, 4) + (pub, f64, svfloat64_t, 2) + (pub, i64, svint64_t, 2) + (pub, u64, svuint64_t, 2) + + // Internal types: + (pub(super), i8, nxv2i8, 2) + (pub(super), i8, nxv4i8, 4) + (pub(super), i8, nxv8i8, 8) + + (pub(super), i16, nxv2i16, 2) + (pub(super), i16, nxv4i16, 4) + + (pub(super), i32, nxv2i32, 2) + + (pub(super), u8, nxv2u8, 2) + (pub(super), u8, nxv4u8, 4) + (pub(super), u8, nxv8u8, 8) + + (pub(super), u16, nxv2u16, 2) + (pub(super), u16, nxv4u16, 4) + + (pub(super), u32, nxv2u32, 2) +} + +impl_sve_tuple_type! { + (pub, svint8_t, 2, svint8x2_t) + (pub, svuint8_t, 2, svuint8x2_t) + (pub, svint16_t, 2, svint16x2_t) + (pub, svuint16_t, 2, svuint16x2_t) + (pub, svfloat32_t, 2, svfloat32x2_t) + (pub, svint32_t, 2, svint32x2_t) + (pub, svuint32_t, 2, svuint32x2_t) + (pub, svfloat64_t, 2, svfloat64x2_t) + (pub, svint64_t, 2, svint64x2_t) + (pub, svuint64_t, 2, svuint64x2_t) + + (pub, svint8_t, 3, svint8x3_t) + (pub, svuint8_t, 3, svuint8x3_t) + (pub, svint16_t, 3, svint16x3_t) + (pub, svuint16_t, 3, svuint16x3_t) + (pub, svfloat32_t, 3, svfloat32x3_t) + (pub, svint32_t, 3, svint32x3_t) + (pub, svuint32_t, 3, svuint32x3_t) + (pub, svfloat64_t, 3, svfloat64x3_t) + (pub, svint64_t, 3, svint64x3_t) + (pub, svuint64_t, 3, svuint64x3_t) + + (pub, svint8_t, 4, svint8x4_t) + (pub, svuint8_t, 4, svuint8x4_t) + (pub, svint16_t, 4, svint16x4_t) + (pub, svuint16_t, 4, svuint16x4_t) + (pub, svfloat32_t, 4, svfloat32x4_t) + (pub, svint32_t, 4, svint32x4_t) + (pub, svuint32_t, 4, svuint32x4_t) + (pub, svfloat64_t, 4, svfloat64x4_t) + (pub, svint64_t, 4, svint64x4_t) + (pub, svuint64_t, 4, svuint64x4_t) +} + +impl_sign_conversions! { + (i8, u8) + (i16, u16) + (i32, u32) + (i64, u64) + (*const i8, *const u8) + (*const i16, *const u16) + (*const i32, *const u32) + (*const i64, *const u64) + (*mut i8, *mut u8) + (*mut i16, *mut u16) + (*mut i32, *mut u32) + (*mut i64, *mut u64) +} + +impl_sign_conversions_sv! { + (svint8_t, svuint8_t) + (svint16_t, svuint16_t) + (svint32_t, svuint32_t) + (svint64_t, svuint64_t) + + (svint8x2_t, svuint8x2_t) + (svint16x2_t, svuint16x2_t) + (svint32x2_t, svuint32x2_t) + (svint64x2_t, svuint64x2_t) + + (svint8x3_t, svuint8x3_t) + (svint16x3_t, svuint16x3_t) + (svint32x3_t, svuint32x3_t) + (svint64x3_t, svuint64x3_t) + + (svint8x4_t, svuint8x4_t) + (svint16x4_t, svuint16x4_t) + (svint32x4_t, svuint32x4_t) + (svint64x4_t, svuint64x4_t) + + // Internal types: + (nxv2i8, nxv2u8) + (nxv4i8, nxv4u8) + (nxv8i8, nxv8u8) + + (nxv2i16, nxv2u16) + (nxv4i16, nxv4u16) + + (nxv2i32, nxv2u32) +} + +impl_internal_sve_predicate! { + (svbool2_t, 2) + (svbool4_t, 4) + (svbool8_t, 8) +} + +/// Patterns returned by a `PTRUE` +#[repr(i32)] +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, ConstParamTy)] +#[non_exhaustive] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub enum svpattern { + /// Activate the largest power-of-two number of elements that is less than the vector length + SV_POW2 = 0, + /// Activate the first element + SV_VL1 = 1, + /// Activate the first two elements + SV_VL2 = 2, + /// Activate the first three elements + SV_VL3 = 3, + /// Activate the first four elements + SV_VL4 = 4, + /// Activate the first five elements + SV_VL5 = 5, + /// Activate the first six elements + SV_VL6 = 6, + /// Activate the first seven elements + SV_VL7 = 7, + /// Activate the first eight elements + SV_VL8 = 8, + /// Activate the first sixteen elements + SV_VL16 = 9, + /// Activate the first thirty-two elements + SV_VL32 = 10, + /// Activate the first sixty-four elements + SV_VL64 = 11, + /// Activate the first one-hundred-and-twenty-eight elements + SV_VL128 = 12, + /// Activate the first two-hundred-and-fifty-six elements + SV_VL256 = 13, + /// Activate the largest multiple-of-four number of elements that is less than the vector length + SV_MUL4 = 29, + /// Activate the largest multiple-of-three number of elements that is less than the vector + /// length + SV_MUL3 = 30, + /// Activate all elements + SV_ALL = 31, +} + +/// Addressing mode for prefetch intrinsics - allows the specification of the expected access +/// kind (read or write), the cache level to load the data, the data retention policy +/// (temporal or streaming) +#[repr(i32)] +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, ConstParamTy)] +#[non_exhaustive] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub enum svprfop { + /// Temporal fetch of the addressed location for reading to the L1 cache (i.e. allocate in + /// cache normally) + SV_PLDL1KEEP = 0, + /// Streaming fetch of the addressed location for reading to the L1 cache (i.e. memory only + /// used once) + SV_PLDL1STRM = 1, + /// Temporal fetch of the addressed location for reading to the L2 cache (i.e. allocate in + /// cache normally) + SV_PLDL2KEEP = 2, + /// Streaming fetch of the addressed location for reading to the L2 cache (i.e. memory only + /// used once) + SV_PLDL2STRM = 3, + /// Temporal fetch of the addressed location for reading to the L3 cache (i.e. allocate in + /// cache normally) + SV_PLDL3KEEP = 4, + /// Streaming fetch of the addressed location for reading to the L3 cache (i.e. memory only + /// used once) + SV_PLDL3STRM = 5, + /// Temporal fetch of the addressed location for writing to the L1 cache (i.e. allocate in + /// cache normally) + SV_PSTL1KEEP = 8, + /// Temporal fetch of the addressed location for writing to the L1 cache (i.e. memory only + /// used once) + SV_PSTL1STRM = 9, + /// Temporal fetch of the addressed location for writing to the L2 cache (i.e. allocate in + /// cache normally) + SV_PSTL2KEEP = 10, + /// Temporal fetch of the addressed location for writing to the L2 cache (i.e. memory only + /// used once) + SV_PSTL2STRM = 11, + /// Temporal fetch of the addressed location for writing to the L3 cache (i.e. allocate in + /// cache normally) + SV_PSTL3KEEP = 12, + /// Temporal fetch of the addressed location for writing to the L3 cache (i.e. memory only + /// used once) + SV_PSTL3STRM = 13, +} + +#[cfg(test)] +#[path = "ld_st_tests_aarch64.rs"] +mod ld_st_tests; diff --git a/crates/core_arch/src/aarch64/sve2/generated.rs b/crates/core_arch/src/aarch64/sve2/generated.rs new file mode 100644 index 0000000000..cb1114d270 --- /dev/null +++ b/crates/core_arch/src/aarch64/sve2/generated.rs @@ -0,0 +1,23865 @@ +// This code is automatically generated. DO NOT MODIFY. +// +// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file: +// +// ``` +// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec +// ``` +#![allow(improper_ctypes)] + +#[cfg(test)] +use stdarch_test::assert_instr; + +use super::*; + +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv16i8")] + fn _svaba_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svaba_s8(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svaba_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv8i16")] + fn _svaba_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svaba_s16(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svaba_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv4i32")] + fn _svaba_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svaba_s32(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svaba_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv2i64")] + fn _svaba_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svaba_s64(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svaba_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv16i8")] + fn _svaba_u8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svaba_u8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svaba_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv8i16")] + fn _svaba_u16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svaba_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svaba_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv4i32")] + fn _svaba_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svaba_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svaba_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv2i64")] + fn _svaba_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svaba_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svaba_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv8i16")] + fn _svabalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalb_s16(op1, op2, op3) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svabalb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv4i32")] + fn _svabalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalb_s32(op1, op2, op3) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svabalb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv2i64")] + fn _svabalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalb_s64(op1, op2, op3) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svabalb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv8i16")] + fn _svabalb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svabalb_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv4i32")] + fn _svabalb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svabalb_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv2i64")] + fn _svabalb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svabalb_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv8i16")] + fn _svabalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalt_s16(op1, op2, op3) } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svabalt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv4i32")] + fn _svabalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalt_s32(op1, op2, op3) } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svabalt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv2i64")] + fn _svabalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalt_s64(op1, op2, op3) } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svabalt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv8i16")] + fn _svabalt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svabalt_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv4i32")] + fn _svabalt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svabalt_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv2i64")] + fn _svabalt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svabalt_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv8i16")] + fn _svabdlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlb_s16(op1, op2) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svabdlb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv4i32")] + fn _svabdlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlb_s32(op1, op2) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svabdlb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv2i64")] + fn _svabdlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlb_s64(op1, op2) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svabdlb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv8i16")] + fn _svabdlb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svabdlb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv4i32")] + fn _svabdlb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svabdlb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv2i64")] + fn _svabdlb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svabdlb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv8i16")] + fn _svabdlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlt_s16(op1, op2) } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svabdlt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv4i32")] + fn _svabdlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlt_s32(op1, op2) } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svabdlt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv2i64")] + fn _svabdlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlt_s64(op1, op2) } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svabdlt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv8i16")] + fn _svabdlt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svabdlt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv4i32")] + fn _svabdlt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svabdlt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv2i64")] + fn _svabdlt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svabdlt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv8i16")] + fn _svadalp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svadalp_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t { + svadalp_s16_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s16_z(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t { + svadalp_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv4i32")] + fn _svadalp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svadalp_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t { + svadalp_s32_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s32_z(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t { + svadalp_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv2i64")] + fn _svadalp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svadalp_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t { + svadalp_s64_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s64_z(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t { + svadalp_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv8i16")] + fn _svadalp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svadalp_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + svadalp_u16_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + svadalp_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv4i32")] + fn _svadalp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svadalp_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + svadalp_u32_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + svadalp_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv2i64")] + fn _svadalp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svadalp_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + svadalp_u64_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + svadalp_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Add with carry long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclb.nxv4i32")] + fn _svadclb_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svadclb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svadclb_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Add with carry long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclb.nxv2i64")] + fn _svadclb_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svadclb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svadclb_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Add with carry long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclt.nxv4i32")] + fn _svadclt_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svadclt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svadclt_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Add with carry long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclt.nxv2i64")] + fn _svadclt_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svadclt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svadclt_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv8i16")] + fn _svaddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svaddhnb_s16(op1, op2) } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svaddhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv4i32")] + fn _svaddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svaddhnb_s32(op1, op2) } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svaddhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv2i64")] + fn _svaddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svaddhnb_s64(op1, op2) } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svaddhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svaddhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svaddhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svaddhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svaddhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svaddhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svaddhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv8i16")] + fn _svaddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svaddhnt_s16(even, op1, op2) } +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svaddhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv4i32")] + fn _svaddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svaddhnt_s32(even, op1, op2) } +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svaddhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv2i64")] + fn _svaddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svaddhnt_s64(even, op1, op2) } +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svaddhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svaddhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svaddhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svaddhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svaddhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svaddhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svaddhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv8i16")] + fn _svaddlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlb_s16(op1, op2) } +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svaddlb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv4i32")] + fn _svaddlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlb_s32(op1, op2) } +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svaddlb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv2i64")] + fn _svaddlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlb_s64(op1, op2) } +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svaddlb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv8i16")] + fn _svaddlb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svaddlb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv4i32")] + fn _svaddlb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svaddlb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv2i64")] + fn _svaddlb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svaddlb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add long (bottom + top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.saddlbt.nxv8i16" + )] + fn _svaddlbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlbt_s16(op1, op2) } +} +#[doc = "Add long (bottom + top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svaddlbt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add long (bottom + top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.saddlbt.nxv4i32" + )] + fn _svaddlbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlbt_s32(op1, op2) } +} +#[doc = "Add long (bottom + top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svaddlbt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add long (bottom + top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.saddlbt.nxv2i64" + )] + fn _svaddlbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlbt_s64(op1, op2) } +} +#[doc = "Add long (bottom + top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svaddlbt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv8i16")] + fn _svaddlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlt_s16(op1, op2) } +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svaddlt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv4i32")] + fn _svaddlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlt_s32(op1, op2) } +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svaddlt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv2i64")] + fn _svaddlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlt_s64(op1, op2) } +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svaddlt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv8i16")] + fn _svaddlt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svaddlt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv4i32")] + fn _svaddlt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svaddlt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv2i64")] + fn _svaddlt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svaddlt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddp.nxv4f32")] + fn _svaddp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svaddp_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svaddp_f32_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddp.nxv2f64")] + fn _svaddp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svaddp_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svaddp_f64_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv16i8")] + fn _svaddp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svaddp_s8_m(pg, op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svaddp_s8_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv8i16")] + fn _svaddp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svaddp_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svaddp_s16_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv4i32")] + fn _svaddp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svaddp_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svaddp_s32_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv2i64")] + fn _svaddp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svaddp_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svaddp_s64_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svaddp_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svaddp_u8_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svaddp_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svaddp_u16_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svaddp_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svaddp_u32_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svaddp_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svaddp_u64_m(pg, op1, op2) +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv8i16")] + fn _svaddwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwb_s16(op1, op2) } +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svaddwb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv4i32")] + fn _svaddwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwb_s32(op1, op2) } +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svaddwb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv2i64")] + fn _svaddwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwb_s64(op1, op2) } +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svaddwb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv8i16")] + fn _svaddwb_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svaddwb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv4i32")] + fn _svaddwb_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svaddwb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv2i64")] + fn _svaddwb_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svaddwb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv8i16")] + fn _svaddwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwt_s16(op1, op2) } +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svaddwt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv4i32")] + fn _svaddwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwt_s32(op1, op2) } +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svaddwt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv2i64")] + fn _svaddwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwt_s64(op1, op2) } +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svaddwt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv8i16")] + fn _svaddwt_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svaddwt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv4i32")] + fn _svaddwt_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svaddwt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv2i64")] + fn _svaddwt_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svaddwt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "AES single round decryption"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesd[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(aesd))] +pub fn svaesd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesd")] + fn _svaesd_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svaesd_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "AES single round encryption"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaese[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(aese))] +pub fn svaese_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aese")] + fn _svaese_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svaese_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "AES inverse mix columns"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesimc[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(aesimc))] +pub fn svaesimc_u8(op: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesimc")] + fn _svaesimc_u8(op: svint8_t) -> svint8_t; + } + unsafe { _svaesimc_u8(op.as_signed()).as_unsigned() } +} +#[doc = "AES mix columns"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesmc[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(aesmc))] +pub fn svaesmc_u8(op: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesmc")] + fn _svaesmc_u8(op: svint8_t) -> svint8_t; + } + unsafe { _svaesmc_u8(op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv16i8")] + fn _svbcax_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbcax_s8(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbcax_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv8i16")] + fn _svbcax_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbcax_s16(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbcax_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv4i32")] + fn _svbcax_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbcax_s32(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbcax_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv2i64")] + fn _svbcax_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbcax_s64(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbcax_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbcax_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbcax_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbcax_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbcax_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbcax_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbcax_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbcax_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbcax_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv16i8")] + fn _svbdep_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbdep_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svbdep_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv8i16")] + fn _svbdep_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbdep_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svbdep_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv4i32")] + fn _svbdep_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbdep_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svbdep_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv2i64")] + fn _svbdep_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbdep_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svbdep_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv16i8")] + fn _svbext_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbext_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svbext_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv8i16")] + fn _svbext_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbext_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svbext_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv4i32")] + fn _svbext_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbext_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svbext_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv2i64")] + fn _svbext_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbext_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svbext_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv16i8")] + fn _svbgrp_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbgrp_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svbgrp_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv8i16")] + fn _svbgrp_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbgrp_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svbgrp_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv4i32")] + fn _svbgrp_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbgrp_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svbgrp_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv2i64")] + fn _svbgrp_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbgrp_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svbgrp_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv16i8")] + fn _svbsl1n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbsl1n_s8(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbsl1n_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv8i16")] + fn _svbsl1n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbsl1n_s16(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbsl1n_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv4i32")] + fn _svbsl1n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbsl1n_s32(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbsl1n_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv2i64")] + fn _svbsl1n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbsl1n_s64(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbsl1n_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbsl1n_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbsl1n_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbsl1n_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbsl1n_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbsl1n_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbsl1n_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbsl1n_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbsl1n_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv16i8")] + fn _svbsl2n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbsl2n_s8(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbsl2n_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv8i16")] + fn _svbsl2n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbsl2n_s16(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbsl2n_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv4i32")] + fn _svbsl2n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbsl2n_s32(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbsl2n_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv2i64")] + fn _svbsl2n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbsl2n_s64(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbsl2n_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbsl2n_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbsl2n_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbsl2n_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbsl2n_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbsl2n_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbsl2n_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbsl2n_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbsl2n_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv16i8")] + fn _svbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbsl_s8(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbsl_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv8i16")] + fn _svbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbsl_s16(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbsl_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv4i32")] + fn _svbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbsl_s32(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbsl_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv2i64")] + fn _svbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbsl_s64(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbsl_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbsl_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbsl_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbsl_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbsl_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbsl_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbsl_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbsl_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbsl_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv16i8")] + fn _svcadd_s8(op1: svint8_t, op2: svint8_t, imm_rotation: i32) -> svint8_t; + } + unsafe { _svcadd_s8(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv8i16")] + fn _svcadd_s16(op1: svint16_t, op2: svint16_t, imm_rotation: i32) -> svint16_t; + } + unsafe { _svcadd_s16(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv4i32")] + fn _svcadd_s32(op1: svint32_t, op2: svint32_t, imm_rotation: i32) -> svint32_t; + } + unsafe { _svcadd_s32(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv2i64")] + fn _svcadd_s64(op1: svint64_t, op2: svint64_t, imm_rotation: i32) -> svint64_t; + } + unsafe { _svcadd_s64(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cdot, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cdot.lane.nxv4i32" + )] + fn _svcdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcdot_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cdot, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cdot.lane.nxv2i64" + )] + fn _svcdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svcdot_lane_s64(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cdot, IMM_ROTATION = 90))] +pub fn svcdot_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cdot.nxv4i32")] + fn _svcdot_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcdot_s32(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cdot, IMM_ROTATION = 90))] +pub fn svcdot_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, +) -> svint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cdot.nxv2i64")] + fn _svcdot_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svcdot_s64(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmla.lane.x.nxv8i16" + )] + fn _svcmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svcmla_lane_s16(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmla.lane.x.nxv4i32" + )] + fn _svcmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcmla_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_lane_s16::( + op1.as_signed(), + op2.as_signed(), + op3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_lane_s32::( + op1.as_signed(), + op2.as_signed(), + op3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv16i8")] + fn _svcmla_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t, imm_rotation: i32) -> svint8_t; + } + unsafe { _svcmla_s8(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv8i16")] + fn _svcmla_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svcmla_s16(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv4i32")] + fn _svcmla_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcmla_s32(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv2i64")] + fn _svcmla_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svcmla_s64(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u8( + op1: svuint8_t, + op2: svuint8_t, + op3: svuint8_t, +) -> svuint8_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s8::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s16::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s32::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u64( + op1: svuint64_t, + op2: svuint64_t, + op3: svuint64_t, +) -> svuint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s64::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Up convert long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtlt_f64[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtlt))] +pub fn svcvtlt_f64_f32_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtlt.f64f32")] + fn _svcvtlt_f64_f32_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat32_t) + -> svfloat64_t; + } + unsafe { _svcvtlt_f64_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Up convert long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtlt_f64[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtlt))] +pub fn svcvtlt_f64_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe { svcvtlt_f64_f32_m(crate::intrinsics::transmute_unchecked(op), pg, op) } +} +#[doc = "Down convert and narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtnt_f32[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtnt))] +pub fn svcvtnt_f32_f64_m(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtnt.f32f64")] + fn _svcvtnt_f32_f64_m(even: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvtnt_f32_f64_m(even, pg.sve_into(), op) } +} +#[doc = "Down convert and narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtnt_f32[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtnt))] +pub fn svcvtnt_f32_f64_x(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvtnt_f32_f64_m(even, pg, op) +} +#[doc = "Down convert, rounding to odd"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtx))] +pub fn svcvtx_f32_f64_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtx.f32f64")] + fn _svcvtx_f32_f64_m(inactive: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvtx_f32_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Down convert, rounding to odd"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtx))] +pub fn svcvtx_f32_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe { svcvtx_f32_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Down convert, rounding to odd"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtx))] +pub fn svcvtx_f32_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvtx_f32_f64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Down convert, rounding to odd (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtxnt_f32[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtxnt))] +pub fn svcvtxnt_f32_f64_m(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtxnt.f32f64")] + fn _svcvtxnt_f32_f64_m(even: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvtxnt_f32_f64_m(even, pg.sve_into(), op) } +} +#[doc = "Down convert, rounding to odd (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtxnt_f32[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtxnt))] +pub fn svcvtxnt_f32_f64_x(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvtxnt_f32_f64_m(even, pg, op) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv16i8")] + fn _sveor3_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _sveor3_s8(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + sveor3_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv8i16")] + fn _sveor3_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _sveor3_s16(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + sveor3_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv4i32")] + fn _sveor3_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _sveor3_s32(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + sveor3_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv2i64")] + fn _sveor3_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _sveor3_s64(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + sveor3_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { sveor3_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + sveor3_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { sveor3_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + sveor3_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { sveor3_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + sveor3_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { sveor3_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + sveor3_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s8(odd: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv16i8")] + fn _sveorbt_s8(odd: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _sveorbt_s8(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s8(odd: svint8_t, op1: svint8_t, op2: i8) -> svint8_t { + sveorbt_s8(odd, op1, svdup_n_s8(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s16(odd: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv8i16")] + fn _sveorbt_s16(odd: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _sveorbt_s16(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s16(odd: svint16_t, op1: svint16_t, op2: i16) -> svint16_t { + sveorbt_s16(odd, op1, svdup_n_s16(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s32(odd: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv4i32")] + fn _sveorbt_s32(odd: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _sveorbt_s32(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s32(odd: svint32_t, op1: svint32_t, op2: i32) -> svint32_t { + sveorbt_s32(odd, op1, svdup_n_s32(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s64(odd: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv2i64")] + fn _sveorbt_s64(odd: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _sveorbt_s64(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s64(odd: svint64_t, op1: svint64_t, op2: i64) -> svint64_t { + sveorbt_s64(odd, op1, svdup_n_s64(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u8(odd: svuint8_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { sveorbt_s8(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u8(odd: svuint8_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveorbt_u8(odd, op1, svdup_n_u8(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u16(odd: svuint16_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { sveorbt_s16(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u16(odd: svuint16_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveorbt_u16(odd, op1, svdup_n_u16(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u32(odd: svuint32_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { sveorbt_s32(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u32(odd: svuint32_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveorbt_u32(odd, op1, svdup_n_u32(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u64(odd: svuint64_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { sveorbt_s64(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u64(odd: svuint64_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveorbt_u64(odd, op1, svdup_n_u64(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s8(even: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv16i8")] + fn _sveortb_s8(even: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _sveortb_s8(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s8(even: svint8_t, op1: svint8_t, op2: i8) -> svint8_t { + sveortb_s8(even, op1, svdup_n_s8(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s16(even: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv8i16")] + fn _sveortb_s16(even: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _sveortb_s16(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s16(even: svint16_t, op1: svint16_t, op2: i16) -> svint16_t { + sveortb_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s32(even: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv4i32")] + fn _sveortb_s32(even: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _sveortb_s32(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s32(even: svint32_t, op1: svint32_t, op2: i32) -> svint32_t { + sveortb_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s64(even: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv2i64")] + fn _sveortb_s64(even: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _sveortb_s64(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s64(even: svint64_t, op1: svint64_t, op2: i64) -> svint64_t { + sveortb_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u8(even: svuint8_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { sveortb_s8(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u8(even: svuint8_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveortb_u8(even, op1, svdup_n_u8(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u16(even: svuint16_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { sveortb_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u16(even: svuint16_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveortb_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u32(even: svuint32_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { sveortb_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u32(even: svuint32_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveortb_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u64(even: svuint64_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { sveortb_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u64(even: svuint64_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveortb_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv16i8")] + fn _svhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhadd_s8_m(pg, op1, op2) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhadd_s8_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv8i16")] + fn _svhadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhadd_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhadd_s16_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv4i32")] + fn _svhadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhadd_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhadd_s32_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv2i64")] + fn _svhadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhadd_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhadd_s64_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv16i8")] + fn _svhadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhadd_u8_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv8i16")] + fn _svhadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhadd_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhadd_u16_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv4i32")] + fn _svhadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhadd_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhadd_u32_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv2i64")] + fn _svhadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhadd_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhadd_u64_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Count matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.histcnt.nxv4i32" + )] + fn _svhistcnt_s32_z(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhistcnt_s32_z(pg.sve_into(), op1, op2).as_unsigned() } +} +#[doc = "Count matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.histcnt.nxv2i64" + )] + fn _svhistcnt_s64_z(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhistcnt_s64_z(pg.sve_into(), op1, op2).as_unsigned() } +} +#[doc = "Count matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svhistcnt_s32_z(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Count matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svhistcnt_s64_z(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Count matching elements in 128-bit segments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistseg[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(histseg))] +pub fn svhistseg_s8(op1: svint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.histseg.nxv16i8" + )] + fn _svhistseg_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhistseg_s8(op1, op2).as_unsigned() } +} +#[doc = "Count matching elements in 128-bit segments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistseg[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(histseg))] +pub fn svhistseg_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svhistseg_s8(op1.as_signed(), op2.as_signed()) } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv16i8")] + fn _svhsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsub_s8_m(pg, op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsub_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsub_s8_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsub_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsub_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv8i16")] + fn _svhsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsub_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsub_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsub_s16_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsub_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsub_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv4i32")] + fn _svhsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsub_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsub_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsub_s32_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsub_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsub_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv2i64")] + fn _svhsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsub_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsub_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsub_s64_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsub_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsub_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv16i8")] + fn _svhsub_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsub_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsub_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsub_u8_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsub_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsub_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv8i16")] + fn _svhsub_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsub_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsub_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsub_u16_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsub_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsub_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv4i32")] + fn _svhsub_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsub_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsub_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsub_u32_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsub_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsub_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv2i64")] + fn _svhsub_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsub_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsub_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsub_u64_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsub_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsub_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv16i8")] + fn _svhsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsubr_s8_m(pg, op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsubr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsubr_s8_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsubr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsubr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv8i16")] + fn _svhsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsubr_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsubr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsubr_s16_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsubr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsubr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv4i32")] + fn _svhsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsubr_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsubr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsubr_s32_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsubr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsubr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv2i64")] + fn _svhsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsubr_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsubr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsubr_s64_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsubr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsubr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv16i8")] + fn _svhsubr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsubr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsubr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsubr_u8_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsubr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsubr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv8i16")] + fn _svhsubr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsubr_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsubr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsubr_u16_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsubr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsubr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv4i32")] + fn _svhsubr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsubr_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsubr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsubr_u32_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsubr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsubr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv2i64")] + fn _svhsubr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsubr_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsubr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsubr_u64_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsubr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsubr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64index_f64( + pg: svbool_t, + base: *const f64, + indices: svint64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2f64" + )] + fn _svldnt1_gather_s64index_f64( + pg: svbool2_t, + base: *const f64, + indices: svint64_t, + ) -> svfloat64_t; + } + _svldnt1_gather_s64index_f64(pg.sve_into(), base, indices) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64index_s64( + pg: svbool_t, + base: *const i64, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i64" + )] + fn _svldnt1_gather_s64index_s64( + pg: svbool2_t, + base: *const i64, + indices: svint64_t, + ) -> svint64_t; + } + _svldnt1_gather_s64index_s64(pg.sve_into(), base, indices) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64index_u64( + pg: svbool_t, + base: *const u64, + indices: svint64_t, +) -> svuint64_t { + svldnt1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64index_f64( + pg: svbool_t, + base: *const f64, + indices: svuint64_t, +) -> svfloat64_t { + svldnt1_gather_s64index_f64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64index_s64( + pg: svbool_t, + base: *const i64, + indices: svuint64_t, +) -> svint64_t { + svldnt1_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64index_u64( + pg: svbool_t, + base: *const u64, + indices: svuint64_t, +) -> svuint64_t { + svldnt1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svint64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2f64" + )] + fn _svldnt1_gather_s64offset_f64( + pg: svbool2_t, + base: *const f64, + offsets: svint64_t, + ) -> svfloat64_t; + } + _svldnt1_gather_s64offset_f64(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i64" + )] + fn _svldnt1_gather_s64offset_s64( + pg: svbool2_t, + base: *const i64, + offsets: svint64_t, + ) -> svint64_t; + } + _svldnt1_gather_s64offset_s64(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svint64_t, +) -> svuint64_t { + svldnt1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svuint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4f32" + )] + fn _svldnt1_gather_u32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svldnt1_gather_u32offset_f32(pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32" + )] + fn _svldnt1_gather_u32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svldnt1_gather_u32offset_s32(pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svuint32_t, +) -> svuint32_t { + svldnt1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svuint64_t, +) -> svfloat64_t { + svldnt1_gather_s64offset_f64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svuint64_t, +) -> svint64_t { + svldnt1_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t { + svldnt1_gather_u32base_offset_f32(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t { + svldnt1_gather_u64base_offset_f64(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svfloat32_t { + svldnt1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldnt1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldnt1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svfloat64_t { + svldnt1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svldnt1_gather_u32base_offset_f32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svfloat32_t; + } + _svldnt1_gather_u32base_offset_f32(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svldnt1_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svint32_t; + } + _svldnt1_gather_u32base_offset_s32(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldnt1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svldnt1_gather_u64base_offset_f64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svfloat64_t; + } + _svldnt1_gather_u64base_offset_f64(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svldnt1_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svint64_t; + } + _svldnt1_gather_u64base_offset_s64(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_s64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i8" + )] + fn _svldnt1sb_gather_s64offset_s64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svldnt1sb_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i16" + )] + fn _svldnt1sh_gather_s64offset_s64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svldnt1sh_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i32" + )] + fn _svldnt1sw_gather_s64offset_s64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast(_svldnt1sw_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_s64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svuint64_t { + svldnt1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8" + )] + fn _svldnt1sb_gather_u32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svldnt1sb_gather_u32offset_s32( + pg.sve_into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16" + )] + fn _svldnt1sh_gather_u32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svldnt1sh_gather_u32offset_s32( + pg.sve_into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svuint32_t { + svldnt1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svuint32_t { + svldnt1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svint64_t { + svldnt1sb_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svint64_t { + svldnt1sh_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svint64_t { + svldnt1sw_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldnt1sb_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svldnt1sb_gather_u32base_offset_s32( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldnt1sh_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svldnt1sh_gather_u32base_offset_s32( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldnt1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldnt1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldnt1sb_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + crate::intrinsics::simd::scalable::sve_cast(_svldnt1sb_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldnt1sh_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svldnt1sh_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldnt1sw_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast(_svldnt1sw_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1sb_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1sh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1sb_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1sh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1sb_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1sh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1sw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1sb_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1sh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1sw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64index_s64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i16" + )] + fn _svldnt1sh_gather_s64index_s64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast(_svldnt1sh_gather_s64index_s64( + pg.sve_into(), + base, + indices, + )) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64index_s64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i32" + )] + fn _svldnt1sw_gather_s64index_s64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast(_svldnt1sw_gather_s64index_s64( + pg.sve_into(), + base, + indices, + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64index_u64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64index_u64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64index_s64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svint64_t { + svldnt1sh_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64index_s64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svint64_t { + svldnt1sw_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64index_u64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64index_u64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldnt1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldnt1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_s64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svint64_t { + svldnt1ub_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svint64_t { + svldnt1uh_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svint64_t { + svldnt1uw_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_s64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i8" + )] + fn _svldnt1ub_gather_s64offset_u64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldnt1ub_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i16" + )] + fn _svldnt1uh_gather_s64offset_u64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldnt1uh_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i32" + )] + fn _svldnt1uw_gather_s64offset_u64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldnt1uw_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svint32_t { + svldnt1ub_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svint32_t { + svldnt1uh_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8" + )] + fn _svldnt1ub_gather_u32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldnt1ub_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16" + )] + fn _svldnt1uh_gather_u32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldnt1uh_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svint64_t { + svldnt1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svint64_t { + svldnt1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svint64_t { + svldnt1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1ub_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1uh_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1uw_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldnt1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldnt1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldnt1ub_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldnt1ub_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldnt1uh_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldnt1uh_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldnt1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldnt1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldnt1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldnt1ub_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldnt1ub_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldnt1uh_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldnt1uh_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldnt1uw_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldnt1uw_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1ub_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1uh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1ub_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1uh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1ub_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1uh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1uw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1ub_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1uh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1uw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64index_s64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svint64_t { + svldnt1uh_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64index_s64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svint64_t { + svldnt1uw_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64index_u64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i16" + )] + fn _svldnt1uh_gather_s64index_u64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldnt1uh_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64index_u64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i32" + )] + fn _svldnt1uw_gather_s64index_u64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::scalable::sve_cast::( + _svldnt1uw_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64index_s64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svint64_t { + svldnt1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64index_s64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svint64_t { + svldnt1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64index_u64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svuint64_t { + svldnt1uh_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64index_u64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svuint64_t { + svldnt1uw_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldnt1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldnt1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Base 2 logarithm as integer"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f32_m(inactive: svint32_t, pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.flogb.nxv4f32")] + fn _svlogb_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svlogb_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f32_x(pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe { svlogb_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f32_z(pg: svbool_t, op: svfloat32_t) -> svint32_t { + svlogb_f32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Base 2 logarithm as integer"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f64_m(inactive: svint64_t, pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.flogb.nxv2f64")] + fn _svlogb_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svlogb_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f64_x(pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe { svlogb_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f64_z(pg: svbool_t, op: svfloat64_t) -> svint64_t { + svlogb_f64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Detect any matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.match.nxv16i8")] + fn _svmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svmatch_s8(pg, op1, op2) } +} +#[doc = "Detect any matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.match.nxv8i16")] + fn _svmatch_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svmatch_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Detect any matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svmatch_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Detect any matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svmatch_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmp.nxv4f32" + )] + fn _svmaxnmp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmaxnmp_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxnmp_f32_m(pg, op1, op2) +} +#[doc = "Maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmp.nxv2f64" + )] + fn _svmaxnmp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmaxnmp_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxnmp_f64_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxp.nxv4f32")] + fn _svmaxp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmaxp_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxp_f32_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxp.nxv2f64")] + fn _svmaxp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmaxp_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxp_f64_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv16i8")] + fn _svmaxp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmaxp_s8_m(pg, op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmaxp_s8_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv8i16")] + fn _svmaxp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmaxp_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmaxp_s16_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv4i32")] + fn _svmaxp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmaxp_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmaxp_s32_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv2i64")] + fn _svmaxp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmaxp_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmaxp_s64_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv16i8")] + fn _svmaxp_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmaxp_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmaxp_u8_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv8i16")] + fn _svmaxp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmaxp_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmaxp_u16_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv4i32")] + fn _svmaxp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmaxp_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmaxp_u32_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv2i64")] + fn _svmaxp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmaxp_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmaxp_u64_m(pg, op1, op2) +} +#[doc = "Minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmp.nxv4f32" + )] + fn _svminnmp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svminnmp_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminnmp_f32_m(pg, op1, op2) +} +#[doc = "Minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmp.nxv2f64" + )] + fn _svminnmp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svminnmp_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminnmp_f64_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminp.nxv4f32")] + fn _svminp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svminp_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminp_f32_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminp.nxv2f64")] + fn _svminp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svminp_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminp_f64_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv16i8")] + fn _svminp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svminp_s8_m(pg, op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svminp_s8_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv8i16")] + fn _svminp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svminp_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svminp_s16_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv4i32")] + fn _svminp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svminp_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svminp_s32_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv2i64")] + fn _svminp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svminp_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svminp_s64_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv16i8")] + fn _svminp_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svminp_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svminp_u8_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv8i16")] + fn _svminp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svminp_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svminp_u16_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv4i32")] + fn _svminp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svminp_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svminp_u32_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv2i64")] + fn _svminp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svminp_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svminp_u64_m(pg, op1, op2) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mla.lane.nxv8i16" + )] + fn _svmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svmla_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mla.lane.nxv4i32" + )] + fn _svmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmla_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mla.lane.nxv2i64" + )] + fn _svmla_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmla_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe { + svmla_lane_s16::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { + svmla_lane_s32::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_u64( + op1: svuint64_t, + op2: svuint64_t, + op3: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { + svmla_lane_s64::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalb.lane.nxv4i32" + )] + fn _svmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlalb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalb.lane.nxv2i64" + )] + fn _svmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlalb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalb.lane.nxv4i32" + )] + fn _svmlalb_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlalb_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalb.lane.nxv2i64" + )] + fn _svmlalb_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlalb_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv8i16")] + fn _svmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalb_s16(op1, op2, op3) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlalb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv4i32")] + fn _svmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalb_s32(op1, op2, op3) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlalb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv2i64")] + fn _svmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalb_s64(op1, op2, op3) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlalb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv8i16")] + fn _svmlalb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlalb_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv4i32")] + fn _svmlalb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlalb_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv2i64")] + fn _svmlalb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlalb_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalt.lane.nxv4i32" + )] + fn _svmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlalt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalt.lane.nxv2i64" + )] + fn _svmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlalt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalt.lane.nxv4i32" + )] + fn _svmlalt_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlalt_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalt.lane.nxv2i64" + )] + fn _svmlalt_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlalt_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv8i16")] + fn _svmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalt_s16(op1, op2, op3) } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlalt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv4i32")] + fn _svmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalt_s32(op1, op2, op3) } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlalt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv2i64")] + fn _svmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalt_s64(op1, op2, op3) } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlalt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv8i16")] + fn _svmlalt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlalt_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv4i32")] + fn _svmlalt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlalt_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv2i64")] + fn _svmlalt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlalt_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mls.lane.nxv8i16" + )] + fn _svmls_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svmls_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mls.lane.nxv4i32" + )] + fn _svmls_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmls_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mls.lane.nxv2i64" + )] + fn _svmls_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmls_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe { + svmls_lane_s16::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { + svmls_lane_s32::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_u64( + op1: svuint64_t, + op2: svuint64_t, + op3: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { + svmls_lane_s64::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslb.lane.nxv4i32" + )] + fn _svmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlslb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslb.lane.nxv2i64" + )] + fn _svmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlslb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslb.lane.nxv4i32" + )] + fn _svmlslb_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlslb_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslb.lane.nxv2i64" + )] + fn _svmlslb_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlslb_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv8i16")] + fn _svmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslb_s16(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlslb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv4i32")] + fn _svmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslb_s32(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlslb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv2i64")] + fn _svmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslb_s64(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlslb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv8i16")] + fn _svmlslb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlslb_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv4i32")] + fn _svmlslb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlslb_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv2i64")] + fn _svmlslb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlslb_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslt.lane.nxv4i32" + )] + fn _svmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlslt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslt.lane.nxv2i64" + )] + fn _svmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlslt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslt.lane.nxv4i32" + )] + fn _svmlslt_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlslt_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslt.lane.nxv2i64" + )] + fn _svmlslt_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlslt_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv8i16")] + fn _svmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslt_s16(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlslt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv4i32")] + fn _svmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslt_s32(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlslt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv2i64")] + fn _svmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslt_s64(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlslt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv8i16")] + fn _svmlslt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlslt_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv4i32")] + fn _svmlslt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlslt_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv2i64")] + fn _svmlslt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlslt_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Move long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllb))] +pub fn svmovlb_s16(op: svint8_t) -> svint16_t { + svshllb_n_s16::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllb))] +pub fn svmovlb_s32(op: svint16_t) -> svint32_t { + svshllb_n_s32::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllb))] +pub fn svmovlb_s64(op: svint32_t) -> svint64_t { + svshllb_n_s64::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllb))] +pub fn svmovlb_u16(op: svuint8_t) -> svuint16_t { + svshllb_n_u16::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllb))] +pub fn svmovlb_u32(op: svuint16_t) -> svuint32_t { + svshllb_n_u32::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllb))] +pub fn svmovlb_u64(op: svuint32_t) -> svuint64_t { + svshllb_n_u64::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllt))] +pub fn svmovlt_s16(op: svint8_t) -> svint16_t { + svshllt_n_s16::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllt))] +pub fn svmovlt_s32(op: svint16_t) -> svint32_t { + svshllt_n_s32::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllt))] +pub fn svmovlt_s64(op: svint32_t) -> svint64_t { + svshllt_n_s64::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllt))] +pub fn svmovlt_u16(op: svuint8_t) -> svuint16_t { + svshllt_n_u16::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllt))] +pub fn svmovlt_u32(op: svuint16_t) -> svuint32_t { + svshllt_n_u32::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllt))] +pub fn svmovlt_u64(op: svuint32_t) -> svuint64_t { + svshllt_n_u64::<0>(op) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul, IMM_INDEX = 0))] +pub fn svmul_lane_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmul.lane.nxv4f32" + )] + fn _svmul_lane_f32(op1: svfloat32_t, op2: svfloat32_t, imm_index: i32) -> svfloat32_t; + } + unsafe { _svmul_lane_f32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul, IMM_INDEX = 0))] +pub fn svmul_lane_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmul.lane.nxv2f64" + )] + fn _svmul_lane_f64(op1: svfloat64_t, op2: svfloat64_t, imm_index: i32) -> svfloat64_t; + } + unsafe { _svmul_lane_f64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mul.lane.nxv8i16" + )] + fn _svmul_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t; + } + unsafe { _svmul_lane_s16(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mul.lane.nxv4i32" + )] + fn _svmul_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmul_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mul.lane.nxv2i64" + )] + fn _svmul_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmul_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe { svmul_lane_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { svmul_lane_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { svmul_lane_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb, IMM_INDEX = 0))] +pub fn svmullb_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullb.lane.nxv4i32" + )] + fn _svmullb_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullb_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb, IMM_INDEX = 0))] +pub fn svmullb_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullb.lane.nxv2i64" + )] + fn _svmullb_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullb_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb, IMM_INDEX = 0))] +pub fn svmullb_lane_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullb.lane.nxv4i32" + )] + fn _svmullb_lane_u32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullb_lane_u32(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb, IMM_INDEX = 0))] +pub fn svmullb_lane_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullb.lane.nxv2i64" + )] + fn _svmullb_lane_u64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullb_lane_u64(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv8i16")] + fn _svmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullb_s16(op1, op2) } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svmullb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv4i32")] + fn _svmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullb_s32(op1, op2) } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svmullb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv2i64")] + fn _svmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullb_s64(op1, op2) } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svmullb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv8i16")] + fn _svmullb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svmullb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv4i32")] + fn _svmullb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svmullb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv2i64")] + fn _svmullb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svmullb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt, IMM_INDEX = 0))] +pub fn svmullt_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullt.lane.nxv4i32" + )] + fn _svmullt_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullt_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt, IMM_INDEX = 0))] +pub fn svmullt_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullt.lane.nxv2i64" + )] + fn _svmullt_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullt_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt, IMM_INDEX = 0))] +pub fn svmullt_lane_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullt.lane.nxv4i32" + )] + fn _svmullt_lane_u32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullt_lane_u32(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt, IMM_INDEX = 0))] +pub fn svmullt_lane_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullt.lane.nxv2i64" + )] + fn _svmullt_lane_u64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullt_lane_u64(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv8i16")] + fn _svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullt_s16(op1, op2) } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svmullt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv4i32")] + fn _svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullt_s32(op1, op2) } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svmullt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv2i64")] + fn _svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullt_s64(op1, op2) } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svmullt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv8i16")] + fn _svmullt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svmullt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv4i32")] + fn _svmullt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svmullt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv2i64")] + fn _svmullt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svmullt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv16i8")] + fn _svnbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svnbsl_s8(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svnbsl_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv8i16")] + fn _svnbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svnbsl_s16(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svnbsl_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv4i32")] + fn _svnbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svnbsl_s32(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svnbsl_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv2i64")] + fn _svnbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svnbsl_s64(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svnbsl_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svnbsl_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svnbsl_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svnbsl_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svnbsl_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svnbsl_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svnbsl_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svnbsl_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svnbsl_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Detect no matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nmatch.nxv16i8")] + fn _svnmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svnmatch_s8(pg, op1, op2) } +} +#[doc = "Detect no matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nmatch.nxv8i16")] + fn _svnmatch_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svnmatch_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Detect no matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svnmatch_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Detect no matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svnmatch_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Polynomial multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmul[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmul))] +pub fn svpmul_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pmul.nxv16i8")] + fn _svpmul_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svpmul_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmul[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmul))] +pub fn svpmul_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svpmul_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullb.pair.nxv16i8" + )] + fn _svpmullb_pair_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svpmullb_pair_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svpmullb_pair_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullb.pair.nxv4i32" + )] + fn _svpmullb_pair_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svpmullb_pair_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svpmullb_pair_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullb.pair.nxv2i64" + )] + fn _svpmullb_pair_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svpmullb_pair_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svpmullb_pair_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(svpmullb_pair_u8(op1, op2)) } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svpmullb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(svpmullb_pair_u32(op1, op2)) } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svpmullb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullt.pair.nxv16i8" + )] + fn _svpmullt_pair_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svpmullt_pair_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svpmullt_pair_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullt.pair.nxv4i32" + )] + fn _svpmullt_pair_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svpmullt_pair_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svpmullt_pair_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullt.pair.nxv2i64" + )] + fn _svpmullt_pair_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svpmullt_pair_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svpmullt_pair_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(svpmullt_pair_u8(op1, op2)) } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svpmullt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(svpmullt_pair_u32(op1, op2)) } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svpmullt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv16i8")] + fn _svqabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svqabs_s8_m(inactive, pg, op) } +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svqabs_s8_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svqabs_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv8i16")] + fn _svqabs_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svqabs_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svqabs_s16_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svqabs_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv4i32")] + fn _svqabs_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svqabs_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svqabs_s32_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svqabs_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv2i64")] + fn _svqabs_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svqabs_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svqabs_s64_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svqabs_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv16i8")] + fn _svqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_s8_m(pg, op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqadd_s8_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv8i16")] + fn _svqadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqadd_s16_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv4i32")] + fn _svqadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqadd_s32_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv2i64")] + fn _svqadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqadd_s64_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv16i8")] + fn _svqadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqadd_u8_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv8i16")] + fn _svqadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqadd_u16_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv4i32")] + fn _svqadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqadd_u32_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv2i64")] + fn _svqadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqadd_u64_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv16i8" + )] + fn _svqcadd_s8(op1: svint8_t, op2: svint8_t, imm_rotation: i32) -> svint8_t; + } + unsafe { _svqcadd_s8(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv8i16" + )] + fn _svqcadd_s16(op1: svint16_t, op2: svint16_t, imm_rotation: i32) -> svint16_t; + } + unsafe { _svqcadd_s16(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv4i32" + )] + fn _svqcadd_s32(op1: svint32_t, op2: svint32_t, imm_rotation: i32) -> svint32_t; + } + unsafe { _svqcadd_s32(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv2i64" + )] + fn _svqcadd_s64(op1: svint64_t, op2: svint64_t, imm_rotation: i32) -> svint64_t; + } + unsafe { _svqcadd_s64(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb, IMM_INDEX = 0))] +pub fn svqdmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.lane.nxv4i32" + )] + fn _svqdmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlalb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb, IMM_INDEX = 0))] +pub fn svqdmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.lane.nxv2i64" + )] + fn _svqdmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlalb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.nxv8i16" + )] + fn _svqdmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlalb_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlalb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.nxv4i32" + )] + fn _svqdmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlalb_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlalb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.nxv2i64" + )] + fn _svqdmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlalb_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlalb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalbt.nxv8i16" + )] + fn _svqdmlalbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlalbt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlalbt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalbt.nxv4i32" + )] + fn _svqdmlalbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlalbt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlalbt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalbt.nxv2i64" + )] + fn _svqdmlalbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlalbt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlalbt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt, IMM_INDEX = 0))] +pub fn svqdmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.lane.nxv4i32" + )] + fn _svqdmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlalt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt, IMM_INDEX = 0))] +pub fn svqdmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.lane.nxv2i64" + )] + fn _svqdmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlalt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.nxv8i16" + )] + fn _svqdmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlalt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlalt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.nxv4i32" + )] + fn _svqdmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlalt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlalt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.nxv2i64" + )] + fn _svqdmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlalt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlalt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb, IMM_INDEX = 0))] +pub fn svqdmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.lane.nxv4i32" + )] + fn _svqdmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlslb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb, IMM_INDEX = 0))] +pub fn svqdmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.lane.nxv2i64" + )] + fn _svqdmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlslb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.nxv8i16" + )] + fn _svqdmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlslb_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlslb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.nxv4i32" + )] + fn _svqdmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlslb_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlslb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.nxv2i64" + )] + fn _svqdmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlslb_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlslb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslbt.nxv8i16" + )] + fn _svqdmlslbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlslbt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlslbt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslbt.nxv4i32" + )] + fn _svqdmlslbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlslbt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlslbt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslbt.nxv2i64" + )] + fn _svqdmlslbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlslbt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlslbt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt, IMM_INDEX = 0))] +pub fn svqdmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.lane.nxv4i32" + )] + fn _svqdmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlslt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt, IMM_INDEX = 0))] +pub fn svqdmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.lane.nxv2i64" + )] + fn _svqdmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlslt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.nxv8i16" + )] + fn _svqdmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlslt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlslt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.nxv4i32" + )] + fn _svqdmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlslt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlslt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.nxv2i64" + )] + fn _svqdmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlslt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlslt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))] +pub fn svqdmulh_lane_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv8i16" + )] + fn _svqdmulh_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t; + } + unsafe { _svqdmulh_lane_s16(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))] +pub fn svqdmulh_lane_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv4i32" + )] + fn _svqdmulh_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqdmulh_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))] +pub fn svqdmulh_lane_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv2i64" + )] + fn _svqdmulh_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqdmulh_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv16i8" + )] + fn _svqdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqdmulh_s8(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqdmulh_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv8i16" + )] + fn _svqdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqdmulh_s16(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqdmulh_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv4i32" + )] + fn _svqdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqdmulh_s32(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqdmulh_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv2i64" + )] + fn _svqdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqdmulh_s64(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqdmulh_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb, IMM_INDEX = 0))] +pub fn svqdmullb_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.lane.nxv4i32" + )] + fn _svqdmullb_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqdmullb_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb, IMM_INDEX = 0))] +pub fn svqdmullb_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.lane.nxv2i64" + )] + fn _svqdmullb_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqdmullb_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.nxv8i16" + )] + fn _svqdmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svqdmullb_s16(op1, op2) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svqdmullb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.nxv4i32" + )] + fn _svqdmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svqdmullb_s32(op1, op2) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svqdmullb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.nxv2i64" + )] + fn _svqdmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svqdmullb_s64(op1, op2) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svqdmullb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt, IMM_INDEX = 0))] +pub fn svqdmullt_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.lane.nxv4i32" + )] + fn _svqdmullt_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqdmullt_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt, IMM_INDEX = 0))] +pub fn svqdmullt_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.lane.nxv2i64" + )] + fn _svqdmullt_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqdmullt_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.nxv8i16" + )] + fn _svqdmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svqdmullt_s16(op1, op2) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svqdmullt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.nxv4i32" + )] + fn _svqdmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svqdmullt_s32(op1, op2) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svqdmullt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.nxv2i64" + )] + fn _svqdmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svqdmullt_s64(op1, op2) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svqdmullt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv16i8")] + fn _svqneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svqneg_s8_m(inactive, pg, op) } +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svqneg_s8_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svqneg_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv8i16")] + fn _svqneg_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svqneg_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svqneg_s16_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svqneg_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv4i32")] + fn _svqneg_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svqneg_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svqneg_s32_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svqneg_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv2i64")] + fn _svqneg_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svqneg_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svqneg_s64_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svqneg_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svqrdcmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.lane.x.nxv8i16" + )] + fn _svqrdcmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svqrdcmlah_lane_s16(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svqrdcmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.lane.x.nxv4i32" + )] + fn _svqrdcmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svqrdcmlah_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s8( + op1: svint8_t, + op2: svint8_t, + op3: svint8_t, +) -> svint8_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv16i8" + )] + fn _svqrdcmlah_s8( + op1: svint8_t, + op2: svint8_t, + op3: svint8_t, + imm_rotation: i32, + ) -> svint8_t; + } + unsafe { _svqrdcmlah_s8(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv8i16" + )] + fn _svqrdcmlah_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svqrdcmlah_s16(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv4i32" + )] + fn _svqrdcmlah_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svqrdcmlah_s32(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv2i64" + )] + fn _svqrdcmlah_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svqrdcmlah_s64(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))] +pub fn svqrdmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv8i16" + )] + fn _svqrdmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svqrdmlah_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))] +pub fn svqrdmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv4i32" + )] + fn _svqrdmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqrdmlah_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))] +pub fn svqrdmlah_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv2i64" + )] + fn _svqrdmlah_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqrdmlah_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv16i8" + )] + fn _svqrdmlah_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svqrdmlah_s8(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svqrdmlah_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv8i16" + )] + fn _svqrdmlah_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svqrdmlah_s16(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svqrdmlah_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv4i32" + )] + fn _svqrdmlah_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svqrdmlah_s32(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svqrdmlah_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv2i64" + )] + fn _svqrdmlah_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svqrdmlah_s64(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svqrdmlah_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))] +pub fn svqrdmlsh_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv8i16" + )] + fn _svqrdmlsh_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svqrdmlsh_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))] +pub fn svqrdmlsh_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv4i32" + )] + fn _svqrdmlsh_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqrdmlsh_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))] +pub fn svqrdmlsh_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv2i64" + )] + fn _svqrdmlsh_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqrdmlsh_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv16i8" + )] + fn _svqrdmlsh_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svqrdmlsh_s8(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svqrdmlsh_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv8i16" + )] + fn _svqrdmlsh_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svqrdmlsh_s16(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svqrdmlsh_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv4i32" + )] + fn _svqrdmlsh_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svqrdmlsh_s32(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svqrdmlsh_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv2i64" + )] + fn _svqrdmlsh_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svqrdmlsh_s64(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svqrdmlsh_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))] +pub fn svqrdmulh_lane_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv8i16" + )] + fn _svqrdmulh_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t; + } + unsafe { _svqrdmulh_lane_s16(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))] +pub fn svqrdmulh_lane_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv4i32" + )] + fn _svqrdmulh_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqrdmulh_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))] +pub fn svqrdmulh_lane_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv2i64" + )] + fn _svqrdmulh_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqrdmulh_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv16i8" + )] + fn _svqrdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqrdmulh_s8(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqrdmulh_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv8i16" + )] + fn _svqrdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqrdmulh_s16(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqrdmulh_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv4i32" + )] + fn _svqrdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqrdmulh_s32(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqrdmulh_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv2i64" + )] + fn _svqrdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqrdmulh_s64(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqrdmulh_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv16i8")] + fn _svqrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqrshl_s8_m(pg, op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqrshl_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqrshl_s8_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqrshl_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqrshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqrshl_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv8i16")] + fn _svqrshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqrshl_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqrshl_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqrshl_s16_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqrshl_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqrshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqrshl_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv4i32")] + fn _svqrshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqrshl_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqrshl_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqrshl_s32_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqrshl_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqrshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqrshl_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv2i64")] + fn _svqrshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqrshl_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqrshl_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqrshl_s64_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqrshl_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqrshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqrshl_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv16i8")] + fn _svqrshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqrshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqrshl_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqrshl_u8_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqrshl_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqrshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqrshl_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv8i16")] + fn _svqrshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqrshl_u16_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqrshl_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqrshl_u16_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqrshl_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqrshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqrshl_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv4i32")] + fn _svqrshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqrshl_u32_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqrshl_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqrshl_u32_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqrshl_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqrshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqrshl_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv2i64")] + fn _svqrshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqrshl_u64_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqrshl_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqrshl_u64_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqrshl_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqrshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqrshl_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnb.nxv8i16" + )] + fn _svqrshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnb_n_s16(op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnb.nxv4i32" + )] + fn _svqrshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnb_n_s32(op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnb.nxv2i64" + )] + fn _svqrshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnb_n_s64(op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnb.nxv8i16" + )] + fn _svqrshrnb_n_u16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnb_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnb.nxv4i32" + )] + fn _svqrshrnb_n_u32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnb_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnb.nxv2i64" + )] + fn _svqrshrnb_n_u64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnb_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnt.nxv8i16" + )] + fn _svqrshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnt.nxv4i32" + )] + fn _svqrshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnt.nxv2i64" + )] + fn _svqrshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnt.nxv8i16" + )] + fn _svqrshrnt_n_u16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnt_n_u16(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnt.nxv4i32" + )] + fn _svqrshrnt_n_u32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnt_n_u32(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnt.nxv2i64" + )] + fn _svqrshrnt_n_u64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnt_n_u64(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))] +pub fn svqrshrunb_n_s16(op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunb.nxv8i16" + )] + fn _svqrshrunb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrunb_n_s16(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))] +pub fn svqrshrunb_n_s32(op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunb.nxv4i32" + )] + fn _svqrshrunb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrunb_n_s32(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))] +pub fn svqrshrunb_n_s64(op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunb.nxv2i64" + )] + fn _svqrshrunb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrunb_n_s64(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))] +pub fn svqrshrunt_n_s16(even: svuint8_t, op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunt.nxv8i16" + )] + fn _svqrshrunt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrunt_n_s16(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))] +pub fn svqrshrunt_n_s32(even: svuint16_t, op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunt.nxv4i32" + )] + fn _svqrshrunt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrunt_n_s32(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))] +pub fn svqrshrunt_n_s64(even: svuint32_t, op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunt.nxv2i64" + )] + fn _svqrshrunt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrunt_n_s64(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv16i8")] + fn _svqshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqshl_s8_m(pg, op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqshl_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqshl_s8_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqshl_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqshl_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv8i16")] + fn _svqshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqshl_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqshl_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqshl_s16_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqshl_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqshl_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv4i32")] + fn _svqshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqshl_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqshl_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqshl_s32_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqshl_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqshl_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv2i64")] + fn _svqshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqshl_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqshl_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqshl_s64_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqshl_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqshl_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv16i8")] + fn _svqshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqshl_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqshl_u8_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqshl_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqshl_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv8i16")] + fn _svqshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqshl_u16_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqshl_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqshl_u16_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqshl_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqshl_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv4i32")] + fn _svqshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqshl_u32_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqshl_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqshl_u32_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqshl_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqshl_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv2i64")] + fn _svqshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqshl_u64_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqshl_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqshl_u64_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqshl_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqshl_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s8_m(pg: svbool_t, op1: svint8_t) -> svuint8_t { + static_assert_range!(IMM2, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv16i8")] + fn _svqshlu_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshlu_n_s8_m(pg, op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s8_x(pg: svbool_t, op1: svint8_t) -> svuint8_t { + svqshlu_n_s8_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s8_z(pg: svbool_t, op1: svint8_t) -> svuint8_t { + svqshlu_n_s8_m::(pg, svsel_s8(pg, op1, svdup_n_s8(0))) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s16_m(pg: svbool_t, op1: svint16_t) -> svuint16_t { + static_assert_range!(IMM2, 0..=15); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv8i16")] + fn _svqshlu_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshlu_n_s16_m(pg.sve_into(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s16_x(pg: svbool_t, op1: svint16_t) -> svuint16_t { + svqshlu_n_s16_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s16_z(pg: svbool_t, op1: svint16_t) -> svuint16_t { + svqshlu_n_s16_m::(pg, svsel_s16(pg, op1, svdup_n_s16(0))) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s32_m(pg: svbool_t, op1: svint32_t) -> svuint32_t { + static_assert_range!(IMM2, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv4i32")] + fn _svqshlu_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshlu_n_s32_m(pg.sve_into(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s32_x(pg: svbool_t, op1: svint32_t) -> svuint32_t { + svqshlu_n_s32_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s32_z(pg: svbool_t, op1: svint32_t) -> svuint32_t { + svqshlu_n_s32_m::(pg, svsel_s32(pg, op1, svdup_n_s32(0))) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s64_m(pg: svbool_t, op1: svint64_t) -> svuint64_t { + static_assert_range!(IMM2, 0..=63); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv2i64")] + fn _svqshlu_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svqshlu_n_s64_m(pg.sve_into(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s64_x(pg: svbool_t, op1: svint64_t) -> svuint64_t { + svqshlu_n_s64_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s64_z(pg: svbool_t, op1: svint64_t) -> svuint64_t { + svqshlu_n_s64_m::(pg, svsel_s64(pg, op1, svdup_n_s64(0))) +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnb.nxv8i16" + )] + fn _svqshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnb_n_s16(op1, IMM2) } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnb.nxv4i32" + )] + fn _svqshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnb_n_s32(op1, IMM2) } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnb.nxv2i64" + )] + fn _svqshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnb_n_s64(op1, IMM2) } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnb.nxv8i16" + )] + fn _svqshrnb_n_u16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnb_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnb.nxv4i32" + )] + fn _svqshrnb_n_u32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnb_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnb.nxv2i64" + )] + fn _svqshrnb_n_u64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnb_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnt.nxv8i16" + )] + fn _svqshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnt.nxv4i32" + )] + fn _svqshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnt.nxv2i64" + )] + fn _svqshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnt.nxv8i16" + )] + fn _svqshrnt_n_u16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnt_n_u16(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnt.nxv4i32" + )] + fn _svqshrnt_n_u32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnt_n_u32(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnt.nxv2i64" + )] + fn _svqshrnt_n_u64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnt_n_u64(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))] +pub fn svqshrunb_n_s16(op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunb.nxv8i16" + )] + fn _svqshrunb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrunb_n_s16(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))] +pub fn svqshrunb_n_s32(op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunb.nxv4i32" + )] + fn _svqshrunb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrunb_n_s32(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))] +pub fn svqshrunb_n_s64(op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunb.nxv2i64" + )] + fn _svqshrunb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrunb_n_s64(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))] +pub fn svqshrunt_n_s16(even: svuint8_t, op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunt.nxv8i16" + )] + fn _svqshrunt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrunt_n_s16(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))] +pub fn svqshrunt_n_s32(even: svuint16_t, op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunt.nxv4i32" + )] + fn _svqshrunt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrunt_n_s32(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))] +pub fn svqshrunt_n_s64(even: svuint32_t, op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunt.nxv2i64" + )] + fn _svqshrunt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrunt_n_s64(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv16i8")] + fn _svqsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_s8_m(pg, op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsub_s8_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv8i16")] + fn _svqsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsub_s16_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv4i32")] + fn _svqsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsub_s32_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv2i64")] + fn _svqsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsub_s64_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv16i8")] + fn _svqsub_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsub_u8_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv8i16")] + fn _svqsub_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsub_u16_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv4i32")] + fn _svqsub_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsub_u32_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv2i64")] + fn _svqsub_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsub_u64_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv16i8")] + fn _svqsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsubr_s8_m(pg, op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsubr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsubr_s8_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsubr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsubr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv8i16")] + fn _svqsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsubr_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsubr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsubr_s16_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsubr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsubr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv4i32")] + fn _svqsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsubr_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsubr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsubr_s32_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsubr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsubr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv2i64")] + fn _svqsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsubr_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsubr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsubr_s64_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsubr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsubr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv16i8")] + fn _svqsubr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsubr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsubr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsubr_u8_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsubr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsubr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv8i16")] + fn _svqsubr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsubr_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsubr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsubr_u16_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsubr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsubr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv4i32")] + fn _svqsubr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsubr_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsubr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsubr_u32_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsubr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsubr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv2i64")] + fn _svqsubr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsubr_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsubr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsubr_u64_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsubr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsubr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtnb))] +pub fn svqxtnb_s16(op: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv8i16")] + fn _svqxtnb_s16(op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnb_s16(op) } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtnb))] +pub fn svqxtnb_s32(op: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv4i32")] + fn _svqxtnb_s32(op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnb_s32(op) } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtnb))] +pub fn svqxtnb_s64(op: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv2i64")] + fn _svqxtnb_s64(op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnb_s64(op) } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqxtnb))] +pub fn svqxtnb_u16(op: svuint16_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv8i16")] + fn _svqxtnb_u16(op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnb_u16(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqxtnb))] +pub fn svqxtnb_u32(op: svuint32_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv4i32")] + fn _svqxtnb_u32(op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnb_u32(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqxtnb))] +pub fn svqxtnb_u64(op: svuint64_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv2i64")] + fn _svqxtnb_u64(op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnb_u64(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtnt))] +pub fn svqxtnt_s16(even: svint8_t, op: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv8i16")] + fn _svqxtnt_s16(even: svint8_t, op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnt_s16(even, op) } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtnt))] +pub fn svqxtnt_s32(even: svint16_t, op: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv4i32")] + fn _svqxtnt_s32(even: svint16_t, op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnt_s32(even, op) } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtnt))] +pub fn svqxtnt_s64(even: svint32_t, op: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv2i64")] + fn _svqxtnt_s64(even: svint32_t, op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnt_s64(even, op) } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqxtnt))] +pub fn svqxtnt_u16(even: svuint8_t, op: svuint16_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv8i16")] + fn _svqxtnt_u16(even: svint8_t, op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnt_u16(even.as_signed(), op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqxtnt))] +pub fn svqxtnt_u32(even: svuint16_t, op: svuint32_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv4i32")] + fn _svqxtnt_u32(even: svint16_t, op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnt_u32(even.as_signed(), op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqxtnt))] +pub fn svqxtnt_u64(even: svuint32_t, op: svuint64_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv2i64")] + fn _svqxtnt_u64(even: svint32_t, op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnt_u64(even.as_signed(), op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtunb))] +pub fn svqxtunb_s16(op: svint16_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunb.nxv8i16" + )] + fn _svqxtunb_s16(op: svint16_t) -> svint8_t; + } + unsafe { _svqxtunb_s16(op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtunb))] +pub fn svqxtunb_s32(op: svint32_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunb.nxv4i32" + )] + fn _svqxtunb_s32(op: svint32_t) -> svint16_t; + } + unsafe { _svqxtunb_s32(op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtunb))] +pub fn svqxtunb_s64(op: svint64_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunb.nxv2i64" + )] + fn _svqxtunb_s64(op: svint64_t) -> svint32_t; + } + unsafe { _svqxtunb_s64(op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtunt))] +pub fn svqxtunt_s16(even: svuint8_t, op: svint16_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunt.nxv8i16" + )] + fn _svqxtunt_s16(even: svint8_t, op: svint16_t) -> svint8_t; + } + unsafe { _svqxtunt_s16(even.as_signed(), op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtunt))] +pub fn svqxtunt_s32(even: svuint16_t, op: svint32_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunt.nxv4i32" + )] + fn _svqxtunt_s32(even: svint16_t, op: svint32_t) -> svint16_t; + } + unsafe { _svqxtunt_s32(even.as_signed(), op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtunt))] +pub fn svqxtunt_s64(even: svuint32_t, op: svint64_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunt.nxv2i64" + )] + fn _svqxtunt_s64(even: svint32_t, op: svint64_t) -> svint32_t; + } + unsafe { _svqxtunt_s64(even.as_signed(), op).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnb.nxv8i16" + )] + fn _svraddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svraddhnb_s16(op1, op2) } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svraddhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnb.nxv4i32" + )] + fn _svraddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svraddhnb_s32(op1, op2) } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svraddhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnb.nxv2i64" + )] + fn _svraddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svraddhnb_s64(op1, op2) } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svraddhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svraddhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svraddhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svraddhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svraddhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svraddhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svraddhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnt.nxv8i16" + )] + fn _svraddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svraddhnt_s16(even, op1, op2) } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svraddhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnt.nxv4i32" + )] + fn _svraddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svraddhnt_s32(even, op1, op2) } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svraddhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnt.nxv2i64" + )] + fn _svraddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svraddhnt_s64(even, op1, op2) } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svraddhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svraddhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svraddhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svraddhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svraddhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svraddhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svraddhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise rotate left by 1 and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrax1[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-sha3")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rax1))] +pub fn svrax1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rax1")] + fn _svrax1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrax1_s64(op1, op2) } +} +#[doc = "Bitwise rotate left by 1 and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrax1[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-sha3")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rax1))] +pub fn svrax1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svrax1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urecpe))] +pub fn svrecpe_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urecpe.nxv4i32")] + fn _svrecpe_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrecpe_u32_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urecpe))] +pub fn svrecpe_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrecpe_u32_m(op, pg, op) +} +#[doc = "Reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urecpe))] +pub fn svrecpe_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrecpe_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv16i8")] + fn _svrhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrhadd_s8_m(pg, op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrhadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrhadd_s8_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrhadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrhadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrhadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv8i16")] + fn _svrhadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrhadd_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrhadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrhadd_s16_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrhadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrhadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrhadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv4i32")] + fn _svrhadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrhadd_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrhadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrhadd_s32_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrhadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrhadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrhadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv2i64")] + fn _svrhadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrhadd_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrhadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrhadd_s64_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrhadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrhadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrhadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv16i8")] + fn _svrhadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrhadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svrhadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svrhadd_u8_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svrhadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svrhadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svrhadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv8i16")] + fn _svrhadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrhadd_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svrhadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svrhadd_u16_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svrhadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svrhadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svrhadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv4i32")] + fn _svrhadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrhadd_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svrhadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svrhadd_u32_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svrhadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svrhadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svrhadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv2i64")] + fn _svrhadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrhadd_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svrhadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svrhadd_u64_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svrhadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svrhadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svrhadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv16i8")] + fn _svrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrshl_s8_m(pg, op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrshl_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrshl_s8_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrshl_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrshl_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv8i16")] + fn _svrshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrshl_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrshl_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrshl_s16_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrshl_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrshl_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv4i32")] + fn _svrshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrshl_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrshl_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrshl_s32_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrshl_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrshl_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv2i64")] + fn _svrshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrshl_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrshl_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrshl_s64_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrshl_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrshl_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv16i8")] + fn _svrshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svrshl_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svrshl_u8_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svrshl_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svrshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svrshl_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv8i16")] + fn _svrshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrshl_u16_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svrshl_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svrshl_u16_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svrshl_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svrshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svrshl_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv4i32")] + fn _svrshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrshl_u32_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svrshl_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svrshl_u32_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svrshl_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svrshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svrshl_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv2i64")] + fn _svrshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrshl_u64_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svrshl_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svrshl_u64_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svrshl_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svrshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svrshl_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s8_m(pg: svbool_t, op1: svint8_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv16i8")] + fn _svrshr_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshr_n_s8_m(pg, op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s8_x(pg: svbool_t, op1: svint8_t) -> svint8_t { + svrshr_n_s8_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s8_z(pg: svbool_t, op1: svint8_t) -> svint8_t { + svrshr_n_s8_m::(pg, svsel_s8(pg, op1, svdup_n_s8(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s16_m(pg: svbool_t, op1: svint16_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv8i16")] + fn _svrshr_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshr_n_s16_m(pg.sve_into(), op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s16_x(pg: svbool_t, op1: svint16_t) -> svint16_t { + svrshr_n_s16_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s16_z(pg: svbool_t, op1: svint16_t) -> svint16_t { + svrshr_n_s16_m::(pg, svsel_s16(pg, op1, svdup_n_s16(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s32_m(pg: svbool_t, op1: svint32_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv4i32")] + fn _svrshr_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshr_n_s32_m(pg.sve_into(), op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s32_x(pg: svbool_t, op1: svint32_t) -> svint32_t { + svrshr_n_s32_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s32_z(pg: svbool_t, op1: svint32_t) -> svint32_t { + svrshr_n_s32_m::(pg, svsel_s32(pg, op1, svdup_n_s32(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s64_m(pg: svbool_t, op1: svint64_t) -> svint64_t { + static_assert_range!(IMM2, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv2i64")] + fn _svrshr_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svrshr_n_s64_m(pg.sve_into(), op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s64_x(pg: svbool_t, op1: svint64_t) -> svint64_t { + svrshr_n_s64_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s64_z(pg: svbool_t, op1: svint64_t) -> svint64_t { + svrshr_n_s64_m::(pg, svsel_s64(pg, op1, svdup_n_s64(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u8_m(pg: svbool_t, op1: svuint8_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv16i8")] + fn _svrshr_n_u8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshr_n_u8_m(pg, op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u8_x(pg: svbool_t, op1: svuint8_t) -> svuint8_t { + svrshr_n_u8_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u8_z(pg: svbool_t, op1: svuint8_t) -> svuint8_t { + svrshr_n_u8_m::(pg, svsel_u8(pg, op1, svdup_n_u8(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u16_m(pg: svbool_t, op1: svuint16_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv8i16")] + fn _svrshr_n_u16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshr_n_u16_m(pg.sve_into(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u16_x(pg: svbool_t, op1: svuint16_t) -> svuint16_t { + svrshr_n_u16_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u16_z(pg: svbool_t, op1: svuint16_t) -> svuint16_t { + svrshr_n_u16_m::(pg, svsel_u16(pg, op1, svdup_n_u16(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u32_m(pg: svbool_t, op1: svuint32_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv4i32")] + fn _svrshr_n_u32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshr_n_u32_m(pg.sve_into(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u32_x(pg: svbool_t, op1: svuint32_t) -> svuint32_t { + svrshr_n_u32_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u32_z(pg: svbool_t, op1: svuint32_t) -> svuint32_t { + svrshr_n_u32_m::(pg, svsel_u32(pg, op1, svdup_n_u32(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u64_m(pg: svbool_t, op1: svuint64_t) -> svuint64_t { + static_assert_range!(IMM2, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv2i64")] + fn _svrshr_n_u64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svrshr_n_u64_m(pg.sve_into(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u64_x(pg: svbool_t, op1: svuint64_t) -> svuint64_t { + svrshr_n_u64_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u64_z(pg: svbool_t, op1: svuint64_t) -> svuint64_t { + svrshr_n_u64_m::(pg, svsel_u64(pg, op1, svdup_n_u64(0))) +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv8i16")] + fn _svrshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshrnb_n_s16(op1, IMM2) } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv4i32")] + fn _svrshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshrnb_n_s32(op1, IMM2) } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv2i64")] + fn _svrshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshrnb_n_s64(op1, IMM2) } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe { svrshrnb_n_s16::(op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe { svrshrnb_n_s32::(op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe { svrshrnb_n_s64::(op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv8i16")] + fn _svrshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv4i32")] + fn _svrshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv2i64")] + fn _svrshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe { svrshrnt_n_s16::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe { svrshrnt_n_s32::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe { svrshrnt_n_s64::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursqrte))] +pub fn svrsqrte_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ursqrte.nxv4i32" + )] + fn _svrsqrte_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrsqrte_u32_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursqrte))] +pub fn svrsqrte_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrsqrte_u32_m(op, pg, op) +} +#[doc = "Reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursqrte))] +pub fn svrsqrte_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrsqrte_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv16i8")] + fn _svrsra_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svrsra_n_s8(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv8i16")] + fn _svrsra_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svrsra_n_s16(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv4i32")] + fn _svrsra_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svrsra_n_s32(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv2i64")] + fn _svrsra_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svrsra_n_s64(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv16i8")] + fn _svrsra_n_u8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svrsra_n_u8(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv8i16")] + fn _svrsra_n_u16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svrsra_n_u16(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv4i32")] + fn _svrsra_n_u32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svrsra_n_u32(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv2i64")] + fn _svrsra_n_u64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svrsra_n_u64(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnb.nxv8i16" + )] + fn _svrsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svrsubhnb_s16(op1, op2) } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svrsubhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnb.nxv4i32" + )] + fn _svrsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svrsubhnb_s32(op1, op2) } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svrsubhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnb.nxv2i64" + )] + fn _svrsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svrsubhnb_s64(op1, op2) } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svrsubhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svrsubhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svrsubhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svrsubhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svrsubhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svrsubhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svrsubhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnt.nxv8i16" + )] + fn _svrsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svrsubhnt_s16(even, op1, op2) } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svrsubhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnt.nxv4i32" + )] + fn _svrsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svrsubhnt_s32(even, op1, op2) } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svrsubhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnt.nxv2i64" + )] + fn _svrsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svrsubhnt_s64(even, op1, op2) } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svrsubhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svrsubhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svrsubhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svrsubhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svrsubhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svrsubhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svrsubhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclb.nxv4i32")] + fn _svsbclb_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svsbclb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svsbclb_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclb.nxv2i64")] + fn _svsbclb_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svsbclb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svsbclb_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Subtract with borrow long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclt.nxv4i32")] + fn _svsbclt_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svsbclt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svsbclt_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Subtract with borrow long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclt.nxv2i64")] + fn _svsbclt_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svsbclt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svsbclt_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Shift left long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))] +pub fn svshllb_n_s16(op1: svint8_t) -> svint16_t { + static_assert_range!(IMM2, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv8i16")] + fn _svshllb_n_s16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllb_n_s16(op1, IMM2) } +} +#[doc = "Shift left long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))] +pub fn svshllb_n_s32(op1: svint16_t) -> svint32_t { + static_assert_range!(IMM2, 0..=15); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv4i32")] + fn _svshllb_n_s32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllb_n_s32(op1, IMM2) } +} +#[doc = "Shift left long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))] +pub fn svshllb_n_s64(op1: svint32_t) -> svint64_t { + static_assert_range!(IMM2, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv2i64")] + fn _svshllb_n_s64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllb_n_s64(op1, IMM2) } +} +#[doc = "Shift left long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))] +pub fn svshllb_n_u16(op1: svuint8_t) -> svuint16_t { + static_assert_range!(IMM2, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv8i16")] + fn _svshllb_n_u16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllb_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))] +pub fn svshllb_n_u32(op1: svuint16_t) -> svuint32_t { + static_assert_range!(IMM2, 0..=15); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv4i32")] + fn _svshllb_n_u32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllb_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))] +pub fn svshllb_n_u64(op1: svuint32_t) -> svuint64_t { + static_assert_range!(IMM2, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv2i64")] + fn _svshllb_n_u64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllb_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))] +pub fn svshllt_n_s16(op1: svint8_t) -> svint16_t { + static_assert_range!(IMM2, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv8i16")] + fn _svshllt_n_s16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllt_n_s16(op1, IMM2) } +} +#[doc = "Shift left long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))] +pub fn svshllt_n_s32(op1: svint16_t) -> svint32_t { + static_assert_range!(IMM2, 0..=15); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv4i32")] + fn _svshllt_n_s32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllt_n_s32(op1, IMM2) } +} +#[doc = "Shift left long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))] +pub fn svshllt_n_s64(op1: svint32_t) -> svint64_t { + static_assert_range!(IMM2, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv2i64")] + fn _svshllt_n_s64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllt_n_s64(op1, IMM2) } +} +#[doc = "Shift left long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))] +pub fn svshllt_n_u16(op1: svuint8_t) -> svuint16_t { + static_assert_range!(IMM2, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv8i16")] + fn _svshllt_n_u16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllt_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))] +pub fn svshllt_n_u32(op1: svuint16_t) -> svuint32_t { + static_assert_range!(IMM2, 0..=15); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv4i32")] + fn _svshllt_n_u32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllt_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))] +pub fn svshllt_n_u64(op1: svuint32_t) -> svuint64_t { + static_assert_range!(IMM2, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv2i64")] + fn _svshllt_n_u64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllt_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv8i16")] + fn _svshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svshrnb_n_s16(op1, IMM2) } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv4i32")] + fn _svshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svshrnb_n_s32(op1, IMM2) } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv2i64")] + fn _svshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svshrnb_n_s64(op1, IMM2) } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe { svshrnb_n_s16::(op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe { svshrnb_n_s32::(op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe { svshrnb_n_s64::(op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv8i16")] + fn _svshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv4i32")] + fn _svshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv2i64")] + fn _svshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe { svshrnt_n_s16::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe { svshrnt_n_s32::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe { svshrnt_n_s64::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv16i8")] + fn _svsli_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsli_n_s8(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 0..=15); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv8i16")] + fn _svsli_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsli_n_s16(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv4i32")] + fn _svsli_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsli_n_s32(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 0..=63); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv2i64")] + fn _svsli_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsli_n_s64(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 0..=7); + unsafe { svsli_n_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 0..=15); + unsafe { svsli_n_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 0..=31); + unsafe { svsli_n_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 0..=63); + unsafe { svsli_n_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "SM4 encryption and decryption"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsm4e[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-sm4")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sm4e))] +pub fn svsm4e_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sm4e")] + fn _svsm4e_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsm4e_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "SM4 key updates"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsm4ekey[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-sm4")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sm4ekey))] +pub fn svsm4ekey_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sm4ekey")] + fn _svsm4ekey_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsm4ekey_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv16i8")] + fn _svsqadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsqadd_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svsqadd_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svsqadd_u8_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svsqadd_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svsqadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svsqadd_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv8i16")] + fn _svsqadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsqadd_u16_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svsqadd_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svsqadd_u16_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svsqadd_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svsqadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svsqadd_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv4i32")] + fn _svsqadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsqadd_u32_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svsqadd_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svsqadd_u32_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svsqadd_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svsqadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svsqadd_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv2i64")] + fn _svsqadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsqadd_u64_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svsqadd_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svsqadd_u64_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svsqadd_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svsqadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svsqadd_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv16i8")] + fn _svsra_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsra_n_s8(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv8i16")] + fn _svsra_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsra_n_s16(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv4i32")] + fn _svsra_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsra_n_s32(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv2i64")] + fn _svsra_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsra_n_s64(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv16i8")] + fn _svsra_n_u8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsra_n_u8(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv8i16")] + fn _svsra_n_u16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsra_n_u16(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv4i32")] + fn _svsra_n_u32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsra_n_u32(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv2i64")] + fn _svsra_n_u64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsra_n_u64(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv16i8")] + fn _svsri_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsri_n_s8(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv8i16")] + fn _svsri_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsri_n_s16(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv4i32")] + fn _svsri_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsri_n_s32(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv2i64")] + fn _svsri_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsri_n_s64(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe { svsri_n_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe { svsri_n_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe { svsri_n_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe { svsri_n_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svint64_t, + data: svfloat64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2f64" + )] + fn _svstnt1_scatter_s64index_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + indices: svint64_t, + ); + } + _svstnt1_scatter_s64index_f64(data, pg.sve_into(), base, indices) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i64" + )] + fn _svstnt1_scatter_s64index_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + indices: svint64_t, + ); + } + _svstnt1_scatter_s64index_s64(data, pg.sve_into(), base, indices) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svuint64_t, + data: svfloat64_t, +) { + svstnt1_scatter_s64index_f64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svuint64_t, + data: svint64_t, +) { + svstnt1_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svuint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svint64_t, + data: svfloat64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2f64" + )] + fn _svstnt1_scatter_s64offset_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + offsets: svint64_t, + ); + } + _svstnt1_scatter_s64offset_f64(data, pg.sve_into(), base, offsets) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i64" + )] + fn _svstnt1_scatter_s64offset_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + offsets: svint64_t, + ); + } + _svstnt1_scatter_s64offset_s64(data, pg.sve_into(), base, offsets) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32offset_f32( + pg: svbool_t, + base: *mut f32, + offsets: svuint32_t, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4f32" + )] + fn _svstnt1_scatter_u32offset_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + offsets: svint32_t, + ); + } + _svstnt1_scatter_u32offset_f32(data, pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i32, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i32" + )] + fn _svstnt1_scatter_u32offset_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + offsets: svint32_t, + ); + } + _svstnt1_scatter_u32offset_s32(data, pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u32, + offsets: svuint32_t, + data: svuint32_t, +) { + svstnt1_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svuint64_t, + data: svfloat64_t, +) { + svstnt1_scatter_s64offset_f64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_f32(pg: svbool_t, bases: svuint32_t, data: svfloat32_t) { + svstnt1_scatter_u32base_offset_f32(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svstnt1_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svstnt1_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_f64(pg: svbool_t, bases: svuint64_t, data: svfloat64_t) { + svstnt1_scatter_u64base_offset_f64(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svfloat32_t, +) { + svstnt1_scatter_u32base_offset_f32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svstnt1_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svstnt1_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svfloat64_t, +) { + svstnt1_scatter_u64base_offset_f64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svstnt1_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svstnt1_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svstnt1_scatter_u32base_offset_f32( + data: svfloat32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1_scatter_u32base_offset_f32(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svstnt1_scatter_u32base_offset_s32( + data: svint32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1_scatter_u32base_offset_s32(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svstnt1_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svfloat64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svstnt1_scatter_u64base_offset_f64( + data: svfloat64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1_scatter_u64base_offset_f64(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svstnt1_scatter_u64base_offset_s64( + data: svint64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1_scatter_u64base_offset_s64(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i8" + )] + fn _svstnt1b_scatter_s64offset_s64( + data: nxv2i8, + pg: svbool2_t, + base: *mut i8, + offsets: svint64_t, + ); + } + _svstnt1b_scatter_s64offset_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i16" + )] + fn _svstnt1h_scatter_s64offset_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + offsets: svint64_t, + ); + } + _svstnt1h_scatter_s64offset_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i32" + )] + fn _svstnt1w_scatter_s64offset_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + offsets: svint64_t, + ); + } + _svstnt1w_scatter_s64offset_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1b_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i8, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i8" + )] + fn _svstnt1b_scatter_u32offset_s32( + data: nxv4i8, + pg: svbool4_t, + base: *mut i8, + offsets: svint32_t, + ); + } + _svstnt1b_scatter_u32offset_s32( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + offsets.as_signed(), + ) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i16, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i16" + )] + fn _svstnt1h_scatter_u32offset_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + offsets: svint32_t, + ); + } + _svstnt1h_scatter_u32offset_s32( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + offsets.as_signed(), + ) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u8, + offsets: svuint32_t, + data: svuint32_t, +) { + svstnt1b_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u16, + offsets: svuint32_t, + data: svuint32_t, +) { + svstnt1h_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1b_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1h_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1w_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1b_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base]_offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svstnt1b_scatter_u32base_offset_s32( + data: nxv4i8, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1b_scatter_u32base_offset_s32( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svstnt1h_scatter_u32base_offset_s32( + data: nxv4i16, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1h_scatter_u32base_offset_s32( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base]_offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svstnt1b_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svstnt1h_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svstnt1b_scatter_u64base_offset_s64( + data: nxv2i8, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1b_scatter_u64base_offset_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svstnt1h_scatter_u64base_offset_s64( + data: nxv2i16, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1h_scatter_u64base_offset_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svstnt1w_scatter_u64base_offset_s64( + data: nxv2i32, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1w_scatter_u64base_offset_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1b_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1h_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1w_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svstnt1b_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svstnt1h_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svstnt1b_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svstnt1h_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1b_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1h_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1w_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1b_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1h_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1w_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i16" + )] + fn _svstnt1h_scatter_s64index_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + indices: svint64_t, + ); + } + _svstnt1h_scatter_s64index_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + indices, + ) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i32" + )] + fn _svstnt1w_scatter_s64index_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + indices: svint64_t, + ); + } + _svstnt1w_scatter_s64index_s64( + crate::intrinsics::simd::scalable::sve_cast(data), + pg.sve_into(), + base, + indices, + ) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svuint64_t, + data: svint64_t, +) { + svstnt1h_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svuint64_t, + data: svint64_t, +) { + svstnt1w_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svuint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svuint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svstnt1h_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svstnt1h_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svstnt1h_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svstnt1w_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svstnt1h_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svstnt1w_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv8i16")] + fn _svsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svsubhnb_s16(op1, op2) } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svsubhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv4i32")] + fn _svsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svsubhnb_s32(op1, op2) } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svsubhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv2i64")] + fn _svsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svsubhnb_s64(op1, op2) } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svsubhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svsubhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svsubhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svsubhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svsubhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svsubhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svsubhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv8i16")] + fn _svsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svsubhnt_s16(even, op1, op2) } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svsubhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv4i32")] + fn _svsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svsubhnt_s32(even, op1, op2) } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svsubhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv2i64")] + fn _svsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svsubhnt_s64(even, op1, op2) } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svsubhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svsubhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svsubhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svsubhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svsubhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svsubhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svsubhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv8i16")] + fn _svsublb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublb_s16(op1, op2) } +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsublb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv4i32")] + fn _svsublb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublb_s32(op1, op2) } +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsublb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv2i64")] + fn _svsublb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublb_s64(op1, op2) } +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsublb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv8i16")] + fn _svsublb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svsublb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv4i32")] + fn _svsublb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svsublb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv2i64")] + fn _svsublb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svsublb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract long (bottom - top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssublbt.nxv8i16" + )] + fn _svsublbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublbt_s16(op1, op2) } +} +#[doc = "Subtract long (bottom - top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsublbt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (bottom - top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssublbt.nxv4i32" + )] + fn _svsublbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublbt_s32(op1, op2) } +} +#[doc = "Subtract long (bottom - top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsublbt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (bottom - top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssublbt.nxv2i64" + )] + fn _svsublbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublbt_s64(op1, op2) } +} +#[doc = "Subtract long (bottom - top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsublbt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv8i16")] + fn _svsublt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublt_s16(op1, op2) } +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsublt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv4i32")] + fn _svsublt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublt_s32(op1, op2) } +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsublt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv2i64")] + fn _svsublt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublt_s64(op1, op2) } +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsublt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv8i16")] + fn _svsublt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svsublt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv4i32")] + fn _svsublt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svsublt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv2i64")] + fn _svsublt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svsublt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract long (top - bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssubltb.nxv8i16" + )] + fn _svsubltb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubltb_s16(op1, op2) } +} +#[doc = "Subtract long (top - bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsubltb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (top - bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssubltb.nxv4i32" + )] + fn _svsubltb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubltb_s32(op1, op2) } +} +#[doc = "Subtract long (top - bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsubltb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (top - bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssubltb.nxv2i64" + )] + fn _svsubltb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubltb_s64(op1, op2) } +} +#[doc = "Subtract long (top - bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsubltb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv8i16")] + fn _svsubwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwb_s16(op1, op2) } +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svsubwb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv4i32")] + fn _svsubwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwb_s32(op1, op2) } +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svsubwb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv2i64")] + fn _svsubwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwb_s64(op1, op2) } +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svsubwb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv8i16")] + fn _svsubwb_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svsubwb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv4i32")] + fn _svsubwb_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svsubwb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv2i64")] + fn _svsubwb_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svsubwb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv8i16")] + fn _svsubwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwt_s16(op1, op2) } +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svsubwt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv4i32")] + fn _svsubwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwt_s32(op1, op2) } +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svsubwt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv2i64")] + fn _svsubwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwt_s64(op1, op2) } +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svsubwt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv8i16")] + fn _svsubwt_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svsubwt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv4i32")] + fn _svsubwt_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svsubwt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv2i64")] + fn _svsubwt_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svsubwt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_f32(data: svfloat32x2_t, indices: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv4f32")] + fn _svtbl2_f32(data0: svfloat32_t, data1: svfloat32_t, indices: svint32_t) -> svfloat32_t; + } + unsafe { + _svtbl2_f32( + svget2_f32::<0>(data), + svget2_f32::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_f64(data: svfloat64x2_t, indices: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv2f64")] + fn _svtbl2_f64(data0: svfloat64_t, data1: svfloat64_t, indices: svint64_t) -> svfloat64_t; + } + unsafe { + _svtbl2_f64( + svget2_f64::<0>(data), + svget2_f64::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s8(data: svint8x2_t, indices: svuint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv16i8")] + fn _svtbl2_s8(data0: svint8_t, data1: svint8_t, indices: svint8_t) -> svint8_t; + } + unsafe { + _svtbl2_s8( + svget2_s8::<0>(data), + svget2_s8::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s16(data: svint16x2_t, indices: svuint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv8i16")] + fn _svtbl2_s16(data0: svint16_t, data1: svint16_t, indices: svint16_t) -> svint16_t; + } + unsafe { + _svtbl2_s16( + svget2_s16::<0>(data), + svget2_s16::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s32(data: svint32x2_t, indices: svuint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv4i32")] + fn _svtbl2_s32(data0: svint32_t, data1: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { + _svtbl2_s32( + svget2_s32::<0>(data), + svget2_s32::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s64(data: svint64x2_t, indices: svuint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv2i64")] + fn _svtbl2_s64(data0: svint64_t, data1: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { + _svtbl2_s64( + svget2_s64::<0>(data), + svget2_s64::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u8(data: svuint8x2_t, indices: svuint8_t) -> svuint8_t { + unsafe { svtbl2_s8(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u16(data: svuint16x2_t, indices: svuint16_t) -> svuint16_t { + unsafe { svtbl2_s16(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u32(data: svuint32x2_t, indices: svuint32_t) -> svuint32_t { + unsafe { svtbl2_s32(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u64(data: svuint64x2_t, indices: svuint64_t) -> svuint64_t { + unsafe { svtbl2_s64(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_f32(fallback: svfloat32_t, data: svfloat32_t, indices: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv4f32")] + fn _svtbx_f32(fallback: svfloat32_t, data: svfloat32_t, indices: svint32_t) -> svfloat32_t; + } + unsafe { _svtbx_f32(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_f64(fallback: svfloat64_t, data: svfloat64_t, indices: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv2f64")] + fn _svtbx_f64(fallback: svfloat64_t, data: svfloat64_t, indices: svint64_t) -> svfloat64_t; + } + unsafe { _svtbx_f64(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s8(fallback: svint8_t, data: svint8_t, indices: svuint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv16i8")] + fn _svtbx_s8(fallback: svint8_t, data: svint8_t, indices: svint8_t) -> svint8_t; + } + unsafe { _svtbx_s8(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s16(fallback: svint16_t, data: svint16_t, indices: svuint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv8i16")] + fn _svtbx_s16(fallback: svint16_t, data: svint16_t, indices: svint16_t) -> svint16_t; + } + unsafe { _svtbx_s16(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s32(fallback: svint32_t, data: svint32_t, indices: svuint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv4i32")] + fn _svtbx_s32(fallback: svint32_t, data: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svtbx_s32(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s64(fallback: svint64_t, data: svint64_t, indices: svuint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv2i64")] + fn _svtbx_s64(fallback: svint64_t, data: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svtbx_s64(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u8(fallback: svuint8_t, data: svuint8_t, indices: svuint8_t) -> svuint8_t { + unsafe { svtbx_s8(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u16(fallback: svuint16_t, data: svuint16_t, indices: svuint16_t) -> svuint16_t { + unsafe { svtbx_s16(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u32(fallback: svuint32_t, data: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svtbx_s32(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u64(fallback: svuint64_t, data: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svtbx_s64(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_b])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(punpkhi))] +pub fn svunpkhi_b(op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.punpkhi.nxv16i1" + )] + fn _svunpkhi_b(op: svbool_t) -> svbool8_t; + } + unsafe { _svunpkhi_b(op).sve_into() } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sunpkhi))] +pub fn svunpkhi_s16(op: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpkhi.nxv8i16" + )] + fn _svunpkhi_s16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpkhi_s16(op) } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sunpkhi))] +pub fn svunpkhi_s32(op: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpkhi.nxv4i32" + )] + fn _svunpkhi_s32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpkhi_s32(op) } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sunpkhi))] +pub fn svunpkhi_s64(op: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpkhi.nxv2i64" + )] + fn _svunpkhi_s64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpkhi_s64(op) } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uunpkhi))] +pub fn svunpkhi_u16(op: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpkhi.nxv8i16" + )] + fn _svunpkhi_u16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpkhi_u16(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uunpkhi))] +pub fn svunpkhi_u32(op: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpkhi.nxv4i32" + )] + fn _svunpkhi_u32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpkhi_u32(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uunpkhi))] +pub fn svunpkhi_u64(op: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpkhi.nxv2i64" + )] + fn _svunpkhi_u64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpkhi_u64(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_b])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(punpklo))] +pub fn svunpklo_b(op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.punpklo.nxv16i1" + )] + fn _svunpklo_b(op: svbool_t) -> svbool8_t; + } + unsafe { _svunpklo_b(op).sve_into() } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sunpklo))] +pub fn svunpklo_s16(op: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpklo.nxv8i16" + )] + fn _svunpklo_s16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpklo_s16(op) } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sunpklo))] +pub fn svunpklo_s32(op: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpklo.nxv4i32" + )] + fn _svunpklo_s32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpklo_s32(op) } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sunpklo))] +pub fn svunpklo_s64(op: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpklo.nxv2i64" + )] + fn _svunpklo_s64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpklo_s64(op) } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uunpklo))] +pub fn svunpklo_u16(op: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpklo.nxv8i16" + )] + fn _svunpklo_u16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpklo_u16(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uunpklo))] +pub fn svunpklo_u32(op: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpklo.nxv4i32" + )] + fn _svunpklo_u32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpklo_u32(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uunpklo))] +pub fn svunpklo_u64(op: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpklo.nxv2i64" + )] + fn _svunpklo_u64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpklo_u64(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv16i8")] + fn _svuqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuqadd_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svuqadd_s8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svuqadd_s8_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svuqadd_s8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svuqadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svuqadd_s8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv8i16")] + fn _svuqadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuqadd_s16_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svuqadd_s16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svuqadd_s16_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svuqadd_s16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svuqadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svuqadd_s16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv4i32")] + fn _svuqadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuqadd_s32_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svuqadd_s32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svuqadd_s32_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svuqadd_s32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svuqadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svuqadd_s32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv2i64")] + fn _svuqadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuqadd_s64_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svuqadd_s64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svuqadd_s64_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svuqadd_s64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svuqadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svuqadd_s64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv16i1.i32" + )] + fn _svwhilege_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilege_b8_s32(op1, op2) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv8i1.i32" + )] + fn _svwhilege_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilege_b16_s32(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv4i1.i32" + )] + fn _svwhilege_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilege_b32_s32(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv2i1.i32" + )] + fn _svwhilege_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilege_b64_s32(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv16i1.i64" + )] + fn _svwhilege_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilege_b8_s64(op1, op2) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv8i1.i64" + )] + fn _svwhilege_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilege_b16_s64(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv4i1.i64" + )] + fn _svwhilege_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilege_b32_s64(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv2i1.i64" + )] + fn _svwhilege_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilege_b64_s64(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv16i1.i32" + )] + fn _svwhilege_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilege_b8_u32(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv8i1.i32" + )] + fn _svwhilege_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilege_b16_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv4i1.i32" + )] + fn _svwhilege_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilege_b32_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv2i1.i32" + )] + fn _svwhilege_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilege_b64_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv16i1.i64" + )] + fn _svwhilege_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilege_b8_u64(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv8i1.i64" + )] + fn _svwhilege_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilege_b16_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv4i1.i64" + )] + fn _svwhilege_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilege_b32_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv2i1.i64" + )] + fn _svwhilege_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilege_b64_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv16i1.i32" + )] + fn _svwhilegt_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilegt_b8_s32(op1, op2) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv8i1.i32" + )] + fn _svwhilegt_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilegt_b16_s32(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv4i1.i32" + )] + fn _svwhilegt_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilegt_b32_s32(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv2i1.i32" + )] + fn _svwhilegt_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilegt_b64_s32(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv16i1.i64" + )] + fn _svwhilegt_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilegt_b8_s64(op1, op2) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv8i1.i64" + )] + fn _svwhilegt_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilegt_b16_s64(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv4i1.i64" + )] + fn _svwhilegt_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilegt_b32_s64(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv2i1.i64" + )] + fn _svwhilegt_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilegt_b64_s64(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv16i1.i32" + )] + fn _svwhilegt_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilegt_b8_u32(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv8i1.i32" + )] + fn _svwhilegt_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilegt_b16_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv4i1.i32" + )] + fn _svwhilegt_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilegt_b32_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv2i1.i32" + )] + fn _svwhilegt_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilegt_b64_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv16i1.i64" + )] + fn _svwhilegt_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilegt_b8_u64(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv8i1.i64" + )] + fn _svwhilegt_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilegt_b16_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv4i1.i64" + )] + fn _svwhilegt_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilegt_b32_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv2i1.i64" + )] + fn _svwhilegt_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilegt_b64_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilerw_8ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.b.nxv16i1.p0" + )] + fn _svwhilerw_8ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool_t; + } + _svwhilerw_8ptr(op1, op2) +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilerw_16ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.h.nxv8i1.p0" + )] + fn _svwhilerw_16ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool8_t; + } + _svwhilerw_16ptr(op1, op2).sve_into() +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilerw_32ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.s.nxv4i1.p0" + )] + fn _svwhilerw_32ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool4_t; + } + _svwhilerw_32ptr(op1, op2).sve_into() +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilerw_64ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.d.nxv2i1.p0" + )] + fn _svwhilerw_64ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool2_t; + } + _svwhilerw_64ptr(op1, op2).sve_into() +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_f32(op1: *const f32, op2: *const f32) -> svbool_t { + svwhilerw_32ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_f64(op1: *const f64, op2: *const f64) -> svbool_t { + svwhilerw_64ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s8(op1: *const i8, op2: *const i8) -> svbool_t { + svwhilerw_8ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s16(op1: *const i16, op2: *const i16) -> svbool_t { + svwhilerw_16ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s32(op1: *const i32, op2: *const i32) -> svbool_t { + svwhilerw_32ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s64(op1: *const i64, op2: *const i64) -> svbool_t { + svwhilerw_64ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u8(op1: *const u8, op2: *const u8) -> svbool_t { + svwhilerw_8ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u16(op1: *const u16, op2: *const u16) -> svbool_t { + svwhilerw_16ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u32(op1: *const u32, op2: *const u32) -> svbool_t { + svwhilerw_32ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u64(op1: *const u64, op2: *const u64) -> svbool_t { + svwhilerw_64ptr::(op1, op2) +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilewr_8ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.b.nxv16i1.p0" + )] + fn _svwhilewr_8ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool_t; + } + _svwhilewr_8ptr(op1, op2) +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilewr_16ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.h.nxv8i1.p0" + )] + fn _svwhilewr_16ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool8_t; + } + _svwhilewr_16ptr(op1, op2).sve_into() +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilewr_32ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.s.nxv4i1.p0" + )] + fn _svwhilewr_32ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool4_t; + } + _svwhilewr_32ptr(op1, op2).sve_into() +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilewr_64ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.d.nxv2i1.p0" + )] + fn _svwhilewr_64ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool2_t; + } + _svwhilewr_64ptr(op1, op2).sve_into() +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_f32(op1: *const f32, op2: *const f32) -> svbool_t { + svwhilewr_32ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_f64(op1: *const f64, op2: *const f64) -> svbool_t { + svwhilewr_64ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s8(op1: *const i8, op2: *const i8) -> svbool_t { + svwhilewr_8ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s16(op1: *const i16, op2: *const i16) -> svbool_t { + svwhilewr_16ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s32(op1: *const i32, op2: *const i32) -> svbool_t { + svwhilewr_32ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s64(op1: *const i64, op2: *const i64) -> svbool_t { + svwhilewr_64ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u8(op1: *const u8, op2: *const u8) -> svbool_t { + svwhilewr_8ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u16(op1: *const u16, op2: *const u16) -> svbool_t { + svwhilewr_16ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u32(op1: *const u32, op2: *const u32) -> svbool_t { + svwhilewr_32ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u64(op1: *const u64, op2: *const u64) -> svbool_t { + svwhilewr_64ptr::(op1, op2) +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv16i8")] + fn _svxar_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svxar_n_s8(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv8i16")] + fn _svxar_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svxar_n_s16(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv4i32")] + fn _svxar_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svxar_n_s32(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv2i64")] + fn _svxar_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svxar_n_s64(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe { svxar_n_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe { svxar_n_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe { svxar_n_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe { svxar_n_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} diff --git a/crates/core_arch/src/aarch64/sve2/ld_st_tests_aarch64.rs b/crates/core_arch/src/aarch64/sve2/ld_st_tests_aarch64.rs new file mode 100644 index 0000000000..2ec3ad6a5d --- /dev/null +++ b/crates/core_arch/src/aarch64/sve2/ld_st_tests_aarch64.rs @@ -0,0 +1,2482 @@ +// This code is automatically generated. DO NOT MODIFY. +// +// Instead, modify `crates/stdarch-gen-arm/spec/sve` and run the following command to re-generate +// this file: +// +// ``` +// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec +// ``` +#![allow(unused)] +use super::*; +use std::boxed::Box; +use std::convert::{TryFrom, TryInto}; +use std::sync::LazyLock; +use std::vec::Vec; +use stdarch_test::simd_test; +static F32_DATA: LazyLock<[f32; 64 * 5]> = LazyLock::new(|| { + (0..64 * 5) + .map(|i| i as f32) + .collect::>() + .try_into() + .expect("f32 data incorrectly initialised") +}); +static F64_DATA: LazyLock<[f64; 32 * 5]> = LazyLock::new(|| { + (0..32 * 5) + .map(|i| i as f64) + .collect::>() + .try_into() + .expect("f64 data incorrectly initialised") +}); +static I8_DATA: LazyLock<[i8; 256 * 5]> = LazyLock::new(|| { + (0..256 * 5) + .map(|i| ((i + 128) % 256 - 128) as i8) + .collect::>() + .try_into() + .expect("i8 data incorrectly initialised") +}); +static I16_DATA: LazyLock<[i16; 128 * 5]> = LazyLock::new(|| { + (0..128 * 5) + .map(|i| i as i16) + .collect::>() + .try_into() + .expect("i16 data incorrectly initialised") +}); +static I32_DATA: LazyLock<[i32; 64 * 5]> = LazyLock::new(|| { + (0..64 * 5) + .map(|i| i as i32) + .collect::>() + .try_into() + .expect("i32 data incorrectly initialised") +}); +static I64_DATA: LazyLock<[i64; 32 * 5]> = LazyLock::new(|| { + (0..32 * 5) + .map(|i| i as i64) + .collect::>() + .try_into() + .expect("i64 data incorrectly initialised") +}); +static U8_DATA: LazyLock<[u8; 256 * 5]> = LazyLock::new(|| { + (0..256 * 5) + .map(|i| i as u8) + .collect::>() + .try_into() + .expect("u8 data incorrectly initialised") +}); +static U16_DATA: LazyLock<[u16; 128 * 5]> = LazyLock::new(|| { + (0..128 * 5) + .map(|i| i as u16) + .collect::>() + .try_into() + .expect("u16 data incorrectly initialised") +}); +static U32_DATA: LazyLock<[u32; 64 * 5]> = LazyLock::new(|| { + (0..64 * 5) + .map(|i| i as u32) + .collect::>() + .try_into() + .expect("u32 data incorrectly initialised") +}); +static U64_DATA: LazyLock<[u64; 32 * 5]> = LazyLock::new(|| { + (0..32 * 5) + .map(|i| i as u64) + .collect::>() + .try_into() + .expect("u64 data incorrectly initialised") +}); +#[target_feature(enable = "sve")] +fn assert_vector_matches_f32(vector: svfloat32_t, expected: svfloat32_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b32(), defined)); + let cmp = svcmpne_f32(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_f64(vector: svfloat64_t, expected: svfloat64_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b64(), defined)); + let cmp = svcmpne_f64(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i8(vector: svint8_t, expected: svint8_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b8(), defined)); + let cmp = svcmpne_s8(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i16(vector: svint16_t, expected: svint16_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b16(), defined)); + let cmp = svcmpne_s16(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i32(vector: svint32_t, expected: svint32_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b32(), defined)); + let cmp = svcmpne_s32(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i64(vector: svint64_t, expected: svint64_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b64(), defined)); + let cmp = svcmpne_s64(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u8(vector: svuint8_t, expected: svuint8_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b8(), defined)); + let cmp = svcmpne_u8(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u16(vector: svuint16_t, expected: svuint16_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b16(), defined)); + let cmp = svcmpne_u16(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u32(vector: svuint32_t, expected: svuint32_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b32(), defined)); + let cmp = svcmpne_u32(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u64(vector: svuint64_t, expected: svuint64_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b64(), defined)); + let cmp = svcmpne_u64(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_s64index_f64_with_svstnt1_scatter_s64index_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let indices = svindex_s64(0, 1); + svstnt1_scatter_s64index_f64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = + svldnt1_gather_s64index_f64(svptrue_b64(), storage.as_ptr() as *const f64, indices); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_s64index_s64_with_svstnt1_scatter_s64index_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1_scatter_s64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = + svldnt1_gather_s64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_s64index_u64_with_svstnt1_scatter_s64index_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1_scatter_s64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = + svldnt1_gather_s64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64index_f64_with_svstnt1_scatter_u64index_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let indices = svindex_u64(0, 1); + svstnt1_scatter_u64index_f64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = + svldnt1_gather_u64index_f64(svptrue_b64(), storage.as_ptr() as *const f64, indices); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64index_s64_with_svstnt1_scatter_u64index_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1_scatter_u64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = + svldnt1_gather_u64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64index_u64_with_svstnt1_scatter_u64index_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1_scatter_u64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = + svldnt1_gather_u64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_s64offset_f64_with_svstnt1_scatter_s64offset_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svstnt1_scatter_s64offset_f64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = + svldnt1_gather_s64offset_f64(svptrue_b64(), storage.as_ptr() as *const f64, offsets); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_s64offset_s64_with_svstnt1_scatter_s64offset_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svstnt1_scatter_s64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = + svldnt1_gather_s64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_s64offset_u64_with_svstnt1_scatter_s64offset_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svstnt1_scatter_s64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = + svldnt1_gather_s64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32offset_f32_with_svstnt1_scatter_u32offset_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32offset_f32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = + svldnt1_gather_u32offset_f32(svptrue_b32(), storage.as_ptr() as *const f32, offsets); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32offset_s32_with_svstnt1_scatter_u32offset_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32offset_s32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1_gather_u32offset_s32(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32offset_u32_with_svstnt1_scatter_u32offset_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32offset_u32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1_gather_u32offset_u32(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64offset_f64_with_svstnt1_scatter_u64offset_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svstnt1_scatter_u64offset_f64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = + svldnt1_gather_u64offset_f64(svptrue_b64(), storage.as_ptr() as *const f64, offsets); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64offset_s64_with_svstnt1_scatter_u64offset_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svstnt1_scatter_u64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = + svldnt1_gather_u64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64offset_u64_with_svstnt1_scatter_u64offset_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svstnt1_scatter_u64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = + svldnt1_gather_u64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_f64_with_svstnt1_scatter_u64base_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_f64(svptrue_b64(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_f64(svptrue_b64(), bases); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_s64_with_svstnt1_scatter_u64base_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_s64(svptrue_b64(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_s64(svptrue_b64(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_u64_with_svstnt1_scatter_u64base_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_u64(svptrue_b64(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_u64(svptrue_b64(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32base_index_f32_with_svstnt1_scatter_u32base_index_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32base_index_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svldnt1_gather_u32base_index_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32base_index_s32_with_svstnt1_scatter_u32base_index_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32base_index_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1_gather_u32base_index_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32base_index_u32_with_svstnt1_scatter_u32base_index_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32base_index_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svldnt1_gather_u32base_index_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_index_f64_with_svstnt1_scatter_u64base_index_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_index_s64_with_svstnt1_scatter_u64base_index_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_index_u64_with_svstnt1_scatter_u64base_index_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32base_offset_f32_with_svstnt1_scatter_u32base_offset_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32base_offset_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svldnt1_gather_u32base_offset_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32base_offset_s32_with_svstnt1_scatter_u32base_offset_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32base_offset_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1_gather_u32base_offset_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32base_offset_u32_with_svstnt1_scatter_u32base_offset_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32base_offset_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svldnt1_gather_u32base_offset_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_offset_f64_with_svstnt1_scatter_u64base_offset_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_offset_s64_with_svstnt1_scatter_u64base_offset_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_offset_u64_with_svstnt1_scatter_u64base_offset_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_s64offset_s64_with_svstnt1b_scatter_s64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = + svldnt1sb_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_s64offset_s64_with_svstnt1h_scatter_s64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_s64offset_s64_with_svstnt1w_scatter_s64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_s64offset_u64_with_svstnt1b_scatter_s64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = + svldnt1sb_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_s64offset_u64_with_svstnt1h_scatter_s64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_s64offset_u64_with_svstnt1w_scatter_s64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u32offset_s32_with_svstnt1b_scatter_u32offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = + svldnt1sb_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u32offset_s32_with_svstnt1h_scatter_u32offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u32offset_u32_with_svstnt1b_scatter_u32offset_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = + svldnt1sb_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u32offset_u32_with_svstnt1h_scatter_u32offset_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u64offset_s64_with_svstnt1b_scatter_u64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = + svldnt1sb_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64offset_s64_with_svstnt1h_scatter_u64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64offset_s64_with_svstnt1w_scatter_u64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u64offset_u64_with_svstnt1b_scatter_u64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = + svldnt1sb_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64offset_u64_with_svstnt1h_scatter_u64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64offset_u64_with_svstnt1w_scatter_u64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u32base_offset_s32_with_svstnt1b_scatter_u32base_offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1sb_gather_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u32base_offset_s32_with_svstnt1h_scatter_u32base_offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u32base_offset_u32_with_svstnt1b_scatter_u32base_offset_u32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1sb_gather_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u32base_offset_u32_with_svstnt1h_scatter_u32base_offset_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u64base_offset_s64_with_svstnt1b_scatter_u64base_offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1sb_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64base_offset_s64_with_svstnt1h_scatter_u64base_offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64base_offset_s64_with_svstnt1w_scatter_u64base_offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u64base_offset_u64_with_svstnt1b_scatter_u64base_offset_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1sb_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64base_offset_u64_with_svstnt1h_scatter_u64base_offset_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64base_offset_u64_with_svstnt1w_scatter_u64base_offset_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u64base_s64_with_svstnt1b_scatter_u64base_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_s64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1sb_gather_u64base_s64(svptrue_b8(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64base_s64_with_svstnt1h_scatter_u64base_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_s64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u64base_s64(svptrue_b16(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64base_s64_with_svstnt1w_scatter_u64base_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_s64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1sw_gather_u64base_s64(svptrue_b32(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u64base_u64_with_svstnt1b_scatter_u64base_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_u64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1sb_gather_u64base_u64(svptrue_b8(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64base_u64_with_svstnt1h_scatter_u64base_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_u64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u64base_u64(svptrue_b16(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64base_u64_with_svstnt1w_scatter_u64base_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_u64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1sw_gather_u64base_u64(svptrue_b32(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_s64index_s64_with_svstnt1h_scatter_s64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_s64index_s64_with_svstnt1w_scatter_s64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_s64index_u64_with_svstnt1h_scatter_s64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_s64index_u64_with_svstnt1w_scatter_s64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64index_s64_with_svstnt1h_scatter_u64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64index_s64_with_svstnt1w_scatter_u64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64index_u64_with_svstnt1h_scatter_u64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64index_u64_with_svstnt1w_scatter_u64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u32base_index_s32_with_svstnt1h_scatter_u32base_index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u32base_index_u32_with_svstnt1h_scatter_u32base_index_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64base_index_s64_with_svstnt1h_scatter_u64base_index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64base_index_s64_with_svstnt1w_scatter_u64base_index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1sw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64base_index_u64_with_svstnt1h_scatter_u64base_index_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64base_index_u64_with_svstnt1w_scatter_u64base_index_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1sw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_s64offset_s64_with_svstnt1b_scatter_s64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = + svldnt1ub_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_s64offset_s64_with_svstnt1h_scatter_s64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_s64offset_s64_with_svstnt1w_scatter_s64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_s64offset_u64_with_svstnt1b_scatter_s64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = + svldnt1ub_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_s64offset_u64_with_svstnt1h_scatter_s64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_s64offset_u64_with_svstnt1w_scatter_s64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u32offset_s32_with_svstnt1b_scatter_u32offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = + svldnt1ub_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u32offset_s32_with_svstnt1h_scatter_u32offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u32offset_u32_with_svstnt1b_scatter_u32offset_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = + svldnt1ub_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u32offset_u32_with_svstnt1h_scatter_u32offset_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u64offset_s64_with_svstnt1b_scatter_u64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = + svldnt1ub_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64offset_s64_with_svstnt1h_scatter_u64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64offset_s64_with_svstnt1w_scatter_u64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u64offset_u64_with_svstnt1b_scatter_u64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = + svldnt1ub_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64offset_u64_with_svstnt1h_scatter_u64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64offset_u64_with_svstnt1w_scatter_u64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u32base_offset_s32_with_svstnt1b_scatter_u32base_offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1ub_gather_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u32base_offset_s32_with_svstnt1h_scatter_u32base_offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u32base_offset_u32_with_svstnt1b_scatter_u32base_offset_u32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1ub_gather_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u32base_offset_u32_with_svstnt1h_scatter_u32base_offset_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u64base_offset_s64_with_svstnt1b_scatter_u64base_offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1ub_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64base_offset_s64_with_svstnt1h_scatter_u64base_offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64base_offset_s64_with_svstnt1w_scatter_u64base_offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u64base_offset_u64_with_svstnt1b_scatter_u64base_offset_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1ub_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64base_offset_u64_with_svstnt1h_scatter_u64base_offset_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64base_offset_u64_with_svstnt1w_scatter_u64base_offset_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u64base_s64_with_svstnt1b_scatter_u64base_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_s64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1ub_gather_u64base_s64(svptrue_b8(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64base_s64_with_svstnt1h_scatter_u64base_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_s64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u64base_s64(svptrue_b16(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64base_s64_with_svstnt1w_scatter_u64base_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_s64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1uw_gather_u64base_s64(svptrue_b32(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u64base_u64_with_svstnt1b_scatter_u64base_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_u64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1ub_gather_u64base_u64(svptrue_b8(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64base_u64_with_svstnt1h_scatter_u64base_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_u64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u64base_u64(svptrue_b16(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64base_u64_with_svstnt1w_scatter_u64base_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_u64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1uw_gather_u64base_u64(svptrue_b32(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_s64index_s64_with_svstnt1h_scatter_s64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_s64index_s64_with_svstnt1w_scatter_s64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_s64index_u64_with_svstnt1h_scatter_s64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_s64index_u64_with_svstnt1w_scatter_s64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64index_s64_with_svstnt1h_scatter_u64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64index_s64_with_svstnt1w_scatter_u64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64index_u64_with_svstnt1h_scatter_u64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64index_u64_with_svstnt1w_scatter_u64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u32base_index_s32_with_svstnt1h_scatter_u32base_index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u32base_index_u32_with_svstnt1h_scatter_u32base_index_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64base_index_s64_with_svstnt1h_scatter_u64base_index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64base_index_s64_with_svstnt1w_scatter_u64base_index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1uw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64base_index_u64_with_svstnt1h_scatter_u64base_index_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64base_index_u64_with_svstnt1w_scatter_u64base_index_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1uw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} diff --git a/crates/core_arch/src/aarch64/sve2/mod.rs b/crates/core_arch/src/aarch64/sve2/mod.rs new file mode 100644 index 0000000000..acf9070214 --- /dev/null +++ b/crates/core_arch/src/aarch64/sve2/mod.rs @@ -0,0 +1,17 @@ +//! SVE2 intrinsics + +#![allow(non_camel_case_types)] + +// `generated.rs` has a `super::*` and this import is for that +use super::sve::*; +use crate::intrinsics::*; + +#[rustfmt::skip] +mod generated; +#[rustfmt::skip] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub use self::generated::*; + +#[cfg(test)] +#[path = "ld_st_tests_aarch64.rs"] +mod ld_st_tests; diff --git a/crates/core_arch/src/lib.rs b/crates/core_arch/src/lib.rs index 9255994e5e..f2f19eba26 100644 --- a/crates/core_arch/src/lib.rs +++ b/crates/core_arch/src/lib.rs @@ -40,7 +40,8 @@ const_cmp, const_eval_select, maybe_uninit_as_bytes, - movrs_target_feature + movrs_target_feature, + min_adt_const_params )] #![cfg_attr(test, feature(test, abi_vectorcall, stdarch_internal))] #![deny(clippy::missing_inline_in_public_items)] diff --git a/crates/core_arch/src/macros.rs b/crates/core_arch/src/macros.rs index 00e92428b3..83039bc65a 100644 --- a/crates/core_arch/src/macros.rs +++ b/crates/core_arch/src/macros.rs @@ -14,6 +14,22 @@ macro_rules! static_assert { }; } +#[allow(unused_macros)] +macro_rules! static_assert_range { + ($imm:ident, $min:literal..=$max:literal) => { + static_assert!( + $min <= $imm && $imm <= $max, + concat!( + stringify!($imm), + " is not in range ", + stringify!($min), + "-", + stringify!($max), + ) + ) + }; +} + #[allow(unused_macros)] macro_rules! static_assert_uimm_bits { ($imm:ident, $bits:expr) => { diff --git a/crates/intrinsic-test/src/arm/json_parser.rs b/crates/intrinsic-test/src/arm/json_parser.rs index 65c179ef0d..c1563a7364 100644 --- a/crates/intrinsic-test/src/arm/json_parser.rs +++ b/crates/intrinsic-test/src/arm/json_parser.rs @@ -12,6 +12,8 @@ use std::path::Path; #[serde(deny_unknown_fields)] struct ReturnType { value: String, + #[serde(rename = "element_bit_size")] + _element_bit_size: Option, } #[derive(Deserialize, Debug)] @@ -50,6 +52,8 @@ struct JsonIntrinsic { args_prep: Option>, #[serde(rename = "Architectures")] architectures: Vec, + #[serde(rename = "instructions")] + _instructions: Option>>, } pub fn get_neon_intrinsics( diff --git a/crates/stdarch-gen-arm/README.md b/crates/stdarch-gen-arm/README.md index 64f1183f1d..970721681c 100644 --- a/crates/stdarch-gen-arm/README.md +++ b/crates/stdarch-gen-arm/README.md @@ -205,9 +205,6 @@ MatchKind: - `Array` - An array of expressions - Usage: `Array: [, ...]` -- `SvUndef` - - Returns the LLVM `undef` symbol - - Usage: `SvUndef` - `Multiply` - Simply `*` - Usage: `Multiply: [, ]` diff --git a/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml b/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml new file mode 100644 index 0000000000..f136bd3d67 --- /dev/null +++ b/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml @@ -0,0 +1,5211 @@ +arch_cfgs: + - arch_name: aarch64 + target_feature: [sve] + llvm_prefix: llvm.aarch64.sve + +uses_neon_types: true +auto_llvm_sign_conversion: true +generate_load_store_tests: true + +# `#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]` +sve-unstable: &sve-unstable + FnCall: [unstable, ['feature = "stdarch_aarch64_sve"', 'issue= "145052"']] + +intrinsics: + - name: svacge[{_n}_{type}] + attr: [*sve-unstable] + doc: Absolute compare greater than or equal to + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64] + assert_instr: [facge] + n_variant_op: op2 + compose: + - LLVMLink: { name: "facge.{sve_type}" } + + - name: svacgt[{_n}_{type}] + attr: [*sve-unstable] + doc: Absolute compare greater than + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64] + assert_instr: [facgt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "facgt.{sve_type}" } + + - name: svacle[{_n}_{type}] + attr: [*sve-unstable] + doc: Absolute compare less than or equal to + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64] + assert_instr: [facge] + n_variant_op: op2 + compose: + - FnCall: ["svacge_{type}", [$pg, $op2, $op1]] + + - name: svaclt[{_n}_{type}] + attr: [*sve-unstable] + doc: Absolute compare less than + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64] + assert_instr: [facgt] + n_variant_op: op2 + compose: + - FnCall: ["svacgt_{type}", [$pg, $op2, $op1]] + + - name: svcadd[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Complex add with rotate + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: IMM_ROTATION, any_values: [90, 270] }] + assert_instr: [[fcadd, "IMM_ROTATION = 90"]] + zeroing_method: { select: op1 } + compose: + - LLVMLink: + name: fcadd.{sve_type} + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$pg, $op1, $op2, $IMM_ROTATION]] + + - name: svcmla[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Complex multiply-add with rotate + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }] + assert_instr: [[fcmla, "IMM_ROTATION = 90"]] + zeroing_method: { select: op1 } + compose: + - LLVMLink: + name: fcmla.{sve_type} + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$pg, $op1, $op2, $op3, $IMM_ROTATION]] + + - name: svcmla_lane[_{type}] + attr: [*sve-unstable] + doc: Complex multiply-add with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [f32] + static_defs: ["const IMM_INDEX: i32", "const IMM_ROTATION: i32"] + constraints: + - variable: IMM_INDEX + range: { match_size: "{type}", default: [0, 1], halfword: [0, 3] } + - { variable: IMM_ROTATION, any_values: [0, 90, 180, 270] } + assert_instr: [[fcmla, "IMM_INDEX = 0, IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: fcmla.lane.x.{sve_type} + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "imm_index: i32" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX, $IMM_ROTATION]] + + - name: svadd[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Add + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind.f}add"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}add.{sve_type}" } + + - name: svqsub[{_n}_{type}] + attr: [*sve-unstable] + doc: Saturating subtract + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.su}qsub"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}qsub.x.{sve_type}" } + + - name: svcnt[_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Count nonzero bits + arguments: + ["inactive: {sve_type[1]}", "pg: {predicate[0]}", "op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [f32, u32] + - [f64, u64] + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + - [u8, u8] + - [u16, u16] + - [u32, u32] + - [u64, u64] + zeroing_method: { drop: inactive } + assert_instr: [cnt] + compose: + - LLVMLink: { name: "cnt.{sve_type[0]}" } + + - name: svcls[_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Count leading sign bits + arguments: + ["inactive: {sve_type[1]}", "pg: {predicate[0]}", "op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: [[i8, u8], [i16, u16], [i32, u32], [i64, u64]] + zeroing_method: { drop: inactive } + assert_instr: [cls] + compose: + - LLVMLink: { name: "cls.{sve_type[0]}" } + + - name: svclz[_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Count leading zero bits + arguments: + ["inactive: {sve_type[1]}", "pg: {predicate[0]}", "op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + - [u8, u8] + - [u16, u16] + - [u32, u32] + - [u64, u64] + zeroing_method: { drop: inactive } + assert_instr: [clz] + compose: + - LLVMLink: { name: "clz.{sve_type[0]}" } + + - name: svext{size_literal[1]}[_{type[0]}]{_mxz} + attr: [*sve-unstable] + substitutions: + sign_or_zero: + match_kind: "{type[0]}" + default: Sign + unsigned: Zero + kind_literal: { match_kind: "{type[0]}", default: s, unsigned: u } + doc: "{sign_or_zero}-extend the low {size[1]} bits" + arguments: + ["inactive: {sve_type[0]}", "pg: {predicate[0]}", "op: {sve_type[0]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + zeroing_method: { drop: inactive } + assert_instr: ["{type_kind[0].su}xt{size_literal[1]}"] + compose: + - LLVMLink: + name: "{type_kind[0].su}xt{size_literal[1]}.{sve_type[0]}" + + - name: svsqrt[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Square root + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { drop: inactive } + assert_instr: [fsqrt] + compose: + - LLVMLink: { name: "fsqrt.{sve_type}" } + + - name: svcmpeq[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare equal to + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [{ float: fcmeq, default: cmpeq }] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}cmpeq.{sve_type}" } + + - name: svcmpeq_wide[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Compare equal to + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{predicate[0]}" + types: + - [[i8, i16, i32], i64] + assert_instr: [cmpeq] + n_variant_op: op2 + compose: + - LLVMLink: { name: "cmpeq.wide.{sve_type[0]}" } + + - name: svcmpge[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare greater than or equal to + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [{ float: fcmge, default: cmpge, unsigned: cmphs }] + n_variant_op: op2 + compose: + - MatchKind: + - "{type}" + - default: + LLVMLink: { name: "{type_kind.f}cmpge.{sve_type}" } + unsigned: + LLVMLink: { name: "cmphs.{sve_type}" } + + - name: svcmpge_wide[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Compare greater than or equal to + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{predicate[0]}" + n_variant_op: op2 + types: + - [[i8, i16, i32], i64] + - [[u8, u16, u32], u64] + assert_instr: [{ default: cmpge, unsigned: cmphs }] + compose: + - MatchKind: + - "{type[0]}" + - default: + LLVMLink: { name: "cmpge.wide.{sve_type[0]}" } + unsigned: + LLVMLink: { name: "cmphs.wide.{sve_type[0]}" } + + - name: svcmpgt[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare greater than + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [{ float: fcmgt, default: cmpgt, unsigned: cmphi }] + n_variant_op: op2 + compose: + - MatchKind: + - "{type}" + - default: + LLVMLink: { name: "{type_kind.f}cmpgt.{sve_type}" } + unsigned: + LLVMLink: { name: "cmphi.{sve_type}" } + + - name: svcmpgt_wide[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Compare greater than + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{predicate[0]}" + types: + - [[i8, i16, i32], i64] + - [[u8, u16, u32], u64] + assert_instr: [{ default: cmpgt, unsigned: cmphi }] + n_variant_op: op2 + compose: + - MatchKind: + - "{type[0]}" + - default: + LLVMLink: { name: "cmpgt.wide.{sve_type[0]}" } + unsigned: + LLVMLink: { name: "cmphi.wide.{sve_type[0]}" } + + - name: svcmple[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare less than or equal to + arguments: ["pg: svbool_t", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "svbool_t" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [{ float: fcmge, default: cmpge, unsigned: cmphs }] + n_variant_op: op2 + compose: + - FnCall: ["svcmpge_{type}", [$pg, $op2, $op1]] + + - name: svcmple_wide[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Compare less than or equal to + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{predicate[0]}" + types: + - [[i8, i16, i32], i64] + - [[u8, u16, u32], u64] + assert_instr: [{ default: cmple, unsigned: cmpls }] + n_variant_op: op2 + compose: + - MatchKind: + - "{type[0]}" + - default: + LLVMLink: { name: "cmple.wide.{sve_type[0]}" } + unsigned: + LLVMLink: { name: "cmpls.wide.{sve_type[0]}" } + + - name: svcmplt[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare less than + arguments: ["pg: svbool_t", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "svbool_t" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [{ float: fcmgt, default: cmpgt, unsigned: cmphi }] + n_variant_op: op2 + compose: + - FnCall: ["svcmpgt_{type}", [$pg, $op2, $op1]] + + - name: svcmplt_wide[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Compare less than + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{predicate[0]}" + types: + - [[i8, i16, i32], i64] + - [[u8, u16, u32], u64] + assert_instr: [{ default: cmplt, unsigned: cmplo }] + n_variant_op: op2 + compose: + - MatchKind: + - "{type[0]}" + - default: + LLVMLink: { name: "cmplt.wide.{sve_type[0]}" } + unsigned: + LLVMLink: { name: "cmplo.wide.{sve_type[0]}" } + + - name: svcmpne[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare not equal to + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [{ float: fcmne, default: cmpne }] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}cmpne.{sve_type}" } + + - name: svcmpne_wide[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Compare not equal to + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{predicate[0]}" + types: [[[i8, i16, i32], i64]] + assert_instr: [cmpne] + n_variant_op: op2 + compose: + - LLVMLink: { name: "cmpne.wide.{sve_type[0]}" } + + - name: svcmpuo[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare unordered with + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64] + assert_instr: [fcmuo] + n_variant_op: op2 + compose: + - LLVMLink: { name: "fcmpuo.{sve_type}" } + + - name: svcnt{size_literal} + attr: [*sve-unstable] + doc: Count the number of {size}-bit elements in a vector + arguments: [] + return_type: u64 + types: [i8, i16, i32, i64] + assert_instr: + - default: { byte: rdvl, halfword: cnth, default: cntw, doubleword: cntd } + compose: + - FnCall: ["svcnt{size_literal}_pat", [], ["{{ svpattern::SV_ALL }}"]] + + - name: svcnt{size_literal}_pat + attr: [*sve-unstable] + doc: Count the number of {size}-bit elements in a vector + arguments: [] + static_defs: ["const PATTERN: svpattern"] + return_type: u64 + assert_instr: + - [rdvl, "PATTERN = {{ svpattern::SV_ALL }}"] + - ["cnt{size_literal}", "PATTERN = {{ svpattern::SV_MUL4 }}"] + types: [i8] + compose: + - LLVMLink: + name: cnt{size_literal} + arguments: ["pattern: svpattern"] + - FnCall: ["{llvm_link}", [$PATTERN]] + + - name: svcnt{size_literal}_pat + attr: [*sve-unstable] + doc: Count the number of {size}-bit elements in a vector + arguments: [] + static_defs: ["const PATTERN: svpattern"] + return_type: u64 + assert_instr: [["cnt{size_literal}", "PATTERN = {{ svpattern::SV_ALL }}"]] + types: [i16, i32, i64] + compose: + - LLVMLink: + name: cnt{size_literal} + arguments: ["pattern: svpattern"] + - FnCall: ["{llvm_link}", [$PATTERN]] + + - name: svlen[_{type}] + attr: [*sve-unstable] + doc: Count the number of elements in a full vector + arguments: ["_op: {sve_type}"] + return_type: "u64" + types: [i8, u8, i16, u16, i32, u32, f32, i64, u64, f64] + assert_instr: [{ default: { default: "cnt{size_literal}", byte: rdvl } }] + compose: + - FnCall: ["svcnt{size_literal}", []] + + - name: svdup[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a scalar value + arguments: ["op: {type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [mov] + compose: + - LLVMLink: { name: "dup.x.{sve_type}" } + + - name: svdup[_n]_{type}{_mxz} + attr: [*sve-unstable] + doc: Broadcast a scalar value + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { drop: inactive } + assert_instr: [mov] + compose: + - LLVMLink: { name: "dup.{sve_type}" } + + - name: svdup[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a scalar value + arguments: ["op: bool"] + return_type: "{predicate}" + types: [b8, b16, b32, b64] + assert_instr: [sbfx, whilelo] + compose: + - LLVMLink: { name: "dup.x.{sve_type}" } + + - name: svdup_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Broadcast a scalar value + arguments: ["data: {sve_type[0]}", "index: {type[1]}"] + return_type: "{sve_type[0]}" + types: + - [f32, u32] + - [f64, u64] + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + - [u8, u8] + - [u16, u16] + - [u32, u32] + - [u64, u64] + assert_instr: [tbl] + compose: + - FnCall: + - svtbl_{type[0]} + - - $data + - FnCall: ["svdup_n_{type[1]}", [$index]] + + - name: svdupq_lane[_{type}] + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + arguments: ["data: {sve_type}", "index: u64"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [tbl] + compose: + - LLVMLink: { name: "dupq.lane.{sve_type}" } + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + arguments: + - "x0: {type}" + - "x1: {type}" + - "x2: {type}" + - "x3: {type}" + - "x4: {type}" + - "x5: {type}" + - "x6: {type}" + - "x7: {type}" + - "x8: {type}" + - "x9: {type}" + - "x10: {type}" + - "x11: {type}" + - "x12: {type}" + - "x13: {type}" + - "x14: {type}" + - "x15: {type}" + return_type: "{sve_type}" + types: [i8, u8] + assert_instr: [] + compose: + - LLVMLink: + name: llvm.experimental.vector.insert.{sve_type}.{neon_type} + arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"] + - Let: + - op + - FnCall: + - "{llvm_link}" + - - FnCall: ["svundef_{type}", [], [], true] + - FnCall: + - "crate::mem::transmute" + - - - $x0 + - $x1 + - $x2 + - $x3 + - $x4 + - $x5 + - $x6 + - $x7 + - $x8 + - $x9 + - $x10 + - $x11 + - $x12 + - $x13 + - $x14 + - $x15 + - 0 + - FnCall: ["svdupq_lane_{type}", [$op, 0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + types: [b8] + arguments: + - "x0: bool" + - "x1: bool" + - "x2: bool" + - "x3: bool" + - "x4: bool" + - "x5: bool" + - "x6: bool" + - "x7: bool" + - "x8: bool" + - "x9: bool" + - "x10: bool" + - "x11: bool" + - "x12: bool" + - "x13: bool" + - "x14: bool" + - "x15: bool" + return_type: "svbool_t" + assert_instr: [] + compose: + - Let: + - op1 + - FnCall: + - svdupq_n_s8 + - - CastAs: [$x0, i8] + - CastAs: [$x1, i8] + - CastAs: [$x2, i8] + - CastAs: [$x3, i8] + - CastAs: [$x4, i8] + - CastAs: [$x5, i8] + - CastAs: [$x6, i8] + - CastAs: [$x7, i8] + - CastAs: [$x8, i8] + - CastAs: [$x9, i8] + - CastAs: [$x10, i8] + - CastAs: [$x11, i8] + - CastAs: [$x12, i8] + - CastAs: [$x13, i8] + - CastAs: [$x14, i8] + - CastAs: [$x15, i8] + - FnCall: + - svcmpne_wide_s8 + - - FnCall: [svptrue_b8, []] + - $op1 + - FnCall: [svdup_n_s64, [0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + arguments: + - "x0: {type}" + - "x1: {type}" + - "x2: {type}" + - "x3: {type}" + - "x4: {type}" + - "x5: {type}" + - "x6: {type}" + - "x7: {type}" + return_type: "{sve_type}" + types: [i16, u16] + assert_instr: [] + compose: + - LLVMLink: + name: llvm.experimental.vector.insert.{sve_type}.{neon_type} + arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"] + - Let: + - op + - FnCall: + - "{llvm_link}" + - - FnCall: ["svundef_{type}", [], [], true] + - FnCall: + - "crate::mem::transmute" + - - [$x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7] + - 0 + - FnCall: ["svdupq_lane_{type}", [$op, 0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + types: [b16] + arguments: + - "x0: bool" + - "x1: bool" + - "x2: bool" + - "x3: bool" + - "x4: bool" + - "x5: bool" + - "x6: bool" + - "x7: bool" + return_type: svbool_t + assert_instr: [] + compose: + - Let: + - op1 + - FnCall: + - svdupq_n_s16 + - - CastAs: [$x0, i16] + - CastAs: [$x1, i16] + - CastAs: [$x2, i16] + - CastAs: [$x3, i16] + - CastAs: [$x4, i16] + - CastAs: [$x5, i16] + - CastAs: [$x6, i16] + - CastAs: [$x7, i16] + - FnCall: + - svcmpne_wide_s16 + - - FnCall: [svptrue_b16, []] + - $op1 + - FnCall: [svdup_n_s64, [0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + arguments: ["x0: {type}", "x1: {type}", "x2: {type}", "x3: {type}"] + return_type: "{sve_type}" + types: [f32, i32, u32] + assert_instr: [] + compose: + - LLVMLink: + name: llvm.experimental.vector.insert.{sve_type}.{neon_type} + arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"] + - Let: + - op + - FnCall: + - "{llvm_link}" + - - FnCall: ["svundef_{type}", [], [], true] + - FnCall: ["crate::mem::transmute", [[$x0, $x1, $x2, $x3]]] + - 0 + - FnCall: ["svdupq_lane_{type}", [$op, 0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + types: [b32] + arguments: ["x0: bool", "x1: bool", "x2: bool", "x3: bool"] + return_type: "svbool_t" + assert_instr: [] + compose: + - Let: + - op1 + - FnCall: + - svdupq_n_s32 + - - CastAs: [$x0, i32] + - CastAs: [$x1, i32] + - CastAs: [$x2, i32] + - CastAs: [$x3, i32] + - FnCall: + - svcmpne_wide_s32 + - - FnCall: [svptrue_b32, []] + - $op1 + - FnCall: [svdup_n_s64, [0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + arguments: ["x0: {type}", "x1: {type}"] + return_type: "{sve_type}" + types: [f64, i64, u64] + assert_instr: [] + compose: + - LLVMLink: + name: llvm.experimental.vector.insert.{sve_type}.{neon_type} + arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"] + - Let: + - op + - FnCall: + - "{llvm_link}" + - - FnCall: ["svundef_{type}", [], [], true] + - FnCall: ["crate::mem::transmute", [[$x0, $x1]]] + - 0 + - FnCall: ["svdupq_lane_{type}", [$op, 0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + types: [b64] + arguments: ["x0: bool", "x1: bool"] + return_type: "svbool_t" + assert_instr: [] + compose: + - Let: + - op1 + - FnCall: [svdupq_n_s64, [CastAs: [$x0, i64], CastAs: [$x1, i64]]] + - FnCall: + - svcmpne_s64 + - - FnCall: [svptrue_b64, []] + - $op1 + - FnCall: [svdup_n_s64, [0]] + + - name: svcreate2[_{type}] + attr: [*sve-unstable] + doc: Create a tuple of two vectors + arguments: ["x0: {sve_type}", "x1: {sve_type}"] + return_type: "{sve_type_x2}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_create2", [$x0, $x1], [], true] + + - name: svcreate3[_{type}] + attr: [*sve-unstable] + doc: Create a tuple of three vectors + arguments: ["x0: {sve_type}", "x1: {sve_type}", "x2: {sve_type}"] + return_type: "{sve_type_x3}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_create3", [$x0, $x1, $x2], [], true] + + - name: svcreate4[_{type}] + attr: [*sve-unstable] + doc: Create a tuple of four vectors + arguments: + ["x0: {sve_type}", "x1: {sve_type}", "x2: {sve_type}", "x3: {sve_type}"] + return_type: "{sve_type_x4}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_create4", [$x0, $x1, $x2, $x3], [], true] + + - name: svundef_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized vector + arguments: [] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["svdup_n_{type}", ["0"]] + + - name: svundef_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized vector + arguments: [] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [] + compose: + - FnCall: ["svdup_n_{type}", ["0{type}"]] + + - name: svundef2_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized tuple of two vectors + arguments: [] + return_type: "{sve_type_x2}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: + - "svcreate2_{type}" + - - FnCall: ["svdup_n_{type}", ["0"]] + - FnCall: ["svdup_n_{type}", ["0"]] + + - name: svundef2_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized tuple of two vectors + arguments: [] + return_type: "{sve_type_x2}" + types: [f32, f64] + assert_instr: [] + compose: + - FnCall: + - "svcreate2_{type}" + - - FnCall: ["svdup_n_{type}", ["0{type}"]] + - FnCall: ["svdup_n_{type}", ["0{type}"]] + + - name: svundef3_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized tuple of three vectors + arguments: [] + return_type: "{sve_type_x3}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: + - "svcreate3_{type}" + - - FnCall: ["svdup_n_{type}", ["0"]] + - FnCall: ["svdup_n_{type}", ["0"]] + - FnCall: ["svdup_n_{type}", ["0"]] + + - name: svundef3_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized tuple of three vectors + arguments: [] + return_type: "{sve_type_x3}" + types: [f32, f64] + assert_instr: [] + compose: + - FnCall: + - "svcreate3_{type}" + - - FnCall: ["svdup_n_{type}", ["0{type}"]] + - FnCall: ["svdup_n_{type}", ["0{type}"]] + - FnCall: ["svdup_n_{type}", ["0{type}"]] + + - name: svundef4_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized tuple of four vectors + arguments: [] + return_type: "{sve_type_x4}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: + - "svcreate4_{type}" + - - FnCall: ["svdup_n_{type}", ["0"]] + - FnCall: ["svdup_n_{type}", ["0"]] + - FnCall: ["svdup_n_{type}", ["0"]] + - FnCall: ["svdup_n_{type}", ["0"]] + + - name: svundef4_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized tuple of four vectors + arguments: [] + return_type: "{sve_type_x4}" + types: [f32, f64] + assert_instr: [] + compose: + - FnCall: + - "svcreate4_{type}" + - - FnCall: ["svdup_n_{type}", ["0{type}"]] + - FnCall: ["svdup_n_{type}", ["0{type}"]] + - FnCall: ["svdup_n_{type}", ["0{type}"]] + - FnCall: ["svdup_n_{type}", ["0{type}"]] + + - name: svindex_{type} + attr: [*sve-unstable] + doc: Create linear series + arguments: ["base: {type}", "step: {type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [index] + compose: + - LLVMLink: { name: "index.{sve_type}" } + + - name: svget2[_{type}] + attr: [*sve-unstable] + doc: Extract one vector from a tuple of two vectors + arguments: ["tuple: {sve_type_x2}"] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, range: [0, 1] }] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_get", [$tuple], ["_", "_", "{{IMM_INDEX}}"], true] + + - name: svget3[_{type}] + attr: [*sve-unstable] + doc: Extract one vector from a tuple of three vectors + arguments: ["tuple: {sve_type_x3}"] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, range: [0, 2] }] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_get", [$tuple], ["_", "_", "{{IMM_INDEX}}"], true] + + - name: svget4[_{type}] + attr: [*sve-unstable] + doc: Extract one vector from a tuple of four vectors + arguments: ["tuple: {sve_type_x4}"] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, range: [0, 3] }] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_get", [$tuple], ["_", "_", "{{IMM_INDEX}}"], true] + + - name: svset2[_{type}] + attr: [*sve-unstable] + doc: Change one vector in a tuple of two vectors + arguments: ["tuple: {sve_type_x2}", "x: {sve_type}"] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, range: [0, 1] }] + return_type: "{sve_type_x2}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_set", [$tuple, $x], ["_", "_", "{{IMM_INDEX}}"], true] + + - name: svset3[_{type}] + attr: [*sve-unstable] + doc: Change one vector in a tuple of three vectors + arguments: ["tuple: {sve_type_x3}", "x: {sve_type}"] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, range: [0, 2] }] + return_type: "{sve_type_x3}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_set", [$tuple, $x], ["_", "_", "{{IMM_INDEX}}"], true] + + - name: svset4[_{type}] + attr: [*sve-unstable] + doc: Change one vector in a tuple of four vectors + arguments: ["tuple: {sve_type_x4}", "x: {sve_type}"] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, range: [0, 3] }] + return_type: "{sve_type_x4}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_set", [$tuple, $x], ["_", "_", "{{IMM_INDEX}}"], true] + + - name: svzip1[_{type}] + attr: [*sve-unstable] + doc: Interleave elements from low halves of two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [zip1] + compose: + - LLVMLink: { name: "zip1.{sve_type}" } + + - name: svzip1_{type} + attr: [*sve-unstable] + doc: Interleave elements from low halves of two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [zip1] + compose: + - LLVMLink: { name: "zip1.{sve_type}" } + + - name: svzip1q[_{type}] + attr: [*sve-unstable] + doc: Interleave quadwords from low halves of two inputs + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [zip1] + compose: + - LLVMLink: { name: "zip1q.{sve_type}" } + + - name: svzip2[_{type}] + attr: [*sve-unstable] + doc: Interleave elements from high halves of two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [zip2] + compose: + - LLVMLink: { name: "zip2.{sve_type}" } + + - name: svzip2_{type} + attr: [*sve-unstable] + doc: Interleave elements from high halves of two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [zip2] + compose: + - LLVMLink: { name: "zip2.{sve_type}" } + + - name: svzip2q[_{type}] + attr: [*sve-unstable] + doc: Interleave quadwords from high halves of two inputs + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [zip2] + compose: + - LLVMLink: { name: "zip2q.{sve_type}" } + + - name: svuzp1[_{type}] + attr: [*sve-unstable] + doc: Concatenate even elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [uzp1] + compose: + - LLVMLink: { name: "uzp1.{sve_type}" } + + - name: svuzp1_{type} + attr: [*sve-unstable] + doc: Concatenate even elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [uzp1] + compose: + - LLVMLink: { name: "uzp1.{sve_type}" } + + - name: svuzp1q[_{type}] + attr: [*sve-unstable] + doc: Concatenate even quadwords from two inputs + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [uzp1] + compose: + - LLVMLink: { name: "uzp1q.{sve_type}" } + + - name: svuzp2[_{type}] + attr: [*sve-unstable] + doc: Concatenate odd elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [uzp2] + compose: + - LLVMLink: { name: "uzp2.{sve_type}" } + + - name: svuzp2_{type} + attr: [*sve-unstable] + doc: Concatenate odd elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [uzp2] + compose: + - LLVMLink: { name: "uzp2.{sve_type}" } + + - name: svuzp2q[_{type}] + attr: [*sve-unstable] + doc: Concatenate odd quadwords from two inputs + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [uzp2] + compose: + - LLVMLink: { name: "uzp2q.{sve_type}" } + + - name: svtrn1[_{type}] + attr: [*sve-unstable] + doc: Interleave even elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [trn1] + compose: + - LLVMLink: { name: "trn1.{sve_type}" } + + - name: svtrn1_{type} + attr: [*sve-unstable] + doc: Interleave even elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [trn1] + compose: + - LLVMLink: { name: "trn1.{sve_type}" } + + - name: svtrn1q[_{type}] + attr: [*sve-unstable] + doc: Interleave even quadwords from two inputs + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [trn1] + compose: + - LLVMLink: { name: "trn1q.{sve_type}" } + + - name: svtrn2[_{type}] + attr: [*sve-unstable] + doc: Interleave odd elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [trn2] + compose: + - LLVMLink: { name: "trn2.{sve_type}" } + + - name: svtrn2_{type} + attr: [*sve-unstable] + doc: Interleave odd elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [trn2] + compose: + - LLVMLink: { name: "trn2.{sve_type}" } + + - name: svtrn2q[_{type}] + attr: [*sve-unstable] + doc: Interleave odd quadwords from two inputs + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [trn2] + compose: + - LLVMLink: { name: "trn2q.{sve_type}" } + + - name: svrev[_{type}] + attr: [*sve-unstable] + doc: Reverse all elements + arguments: ["op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [rev] + compose: + - LLVMLink: { name: "rev.{sve_type}" } + + - name: svrev_{type} + attr: [*sve-unstable] + doc: Reverse all elements + arguments: ["op: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [rev] + compose: + - LLVMLink: { name: "rev.{sve_type}" } + + - name: svrevb[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reverse bytes within elements + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [i16, i32, i64, u16, u32, u64] + zeroing_method: { drop: "inactive" } + assert_instr: [revb] + compose: + - LLVMLink: { name: "revb.{sve_type}" } + + - name: svrevh[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reverse halfwords within elements + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [i32, i64, u32, u64] + zeroing_method: { drop: "inactive" } + assert_instr: [revh] + compose: + - LLVMLink: { name: "revh.{sve_type}" } + + - name: svrevw[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reverse words within elements + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [i64, u64] + zeroing_method: { drop: "inactive" } + assert_instr: [revw] + compose: + - LLVMLink: { name: "revw.{sve_type}" } + + - name: svrbit[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reverse bits + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { drop: "inactive" } + assert_instr: [rbit] + compose: + - LLVMLink: { name: "rbit.{sve_type}" } + + - name: svext[_{type}] + attr: [*sve-unstable] + doc: Extract vector from pair of vectors + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, sve_max_elems_type: "{type}" }] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [[ext, "IMM3 = 1"]] + compose: + - LLVMLink: + name: ext.{sve_type} + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]] + + - name: svsplice[_{type}] + attr: [*sve-unstable] + doc: Splice two vectors under predicate control + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [splice] + compose: + - LLVMLink: { name: "splice.{sve_type}" } + + - name: svinsr[_n_{type}] + attr: [*sve-unstable] + doc: Insert scalar in shifted vector + arguments: ["op1: {sve_type}", "op2: {type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [insr] + compose: + - LLVMLink: { name: "insr.{sve_type}" } + + - name: svld1[_{type}] + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld1{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ld1.{sve_type}" } + + - name: svld1_vnum[_{type}] + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld1{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svld1_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld1_gather_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *{type[1]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["ld1{size_literal[0]}"] + test: { load: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ld1.gather.{type_kind[0].su}xtw.index.{sve_type[1]}" + doubleword: + LLVMLink: + name: "ld1.gather.index.{sve_type[1]}" + + - name: svld1_gather_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *{type[1]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["ld1{size_literal[0]}"] + test: { load: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ld1.gather.{type_kind[0].su}xtw.{sve_type[1]}" + doubleword: + LLVMLink: + name: "ld1.gather.{sve_type[1]}" + + - name: svld1_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ld1{size_literal[0]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ld1.gather.scalar.offset.{sve_type[1]}.{sve_type[0]}" + + - name: svld1_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ld1{size_literal[0]}"] + test: { load: 1 } + compose: + - FnCall: + - "svld1_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svld1_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ld1{size_literal[0]}"] + test: { load: 1 } + compose: + - FnCall: + - "svld1_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]] + + - name: svld1s{size_literal[2]}_gather_[{type[0]}]index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [i32, u32], i16] + - [[i64, u64], [i64, u64], [i16, i32]] + assert_instr: ["ld1s{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ld1.gather.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ld1.gather.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::scalable::sve_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $indices]] + + - name: svld1u{size_literal[2]}_gather_[{type[0]}]index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [u32, i32], u16] + - [[i64, u64], [u64, i64], [u16, u32]] + assert_instr: ["ld1{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ld1.gather.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ld1.gather.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::scalable::sve_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $indices]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svld1s{size_literal[2]}_gather_[{type[0]}]offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [i32, u32], [i8, i16]] + - [[i64, u64], [i64, u64], [i8, i16, i32]] + assert_instr: ["ld1s{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ld1.gather.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ld1.gather.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::scalable::sve_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]] + + - name: svld1u{size_literal[2]}_gather_[{type[0]}]offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [u32, i32], [u8, u16]] + - [[i64, u64], [u64, i64], [u8, u16, u32]] + assert_instr: ["ld1{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ld1.gather.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ld1.gather.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::scalable::sve_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svld1s{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["ld1s{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ld1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::scalable::sve_cast + - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]] + + - name: svld1u{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [u32, i32], [u8, u16]] + - [u64, [u64, i64], [u8, u16, u32]] + assert_instr: ["ld1{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ld1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::scalable::sve_cast + - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svld1s{size_literal[2]}_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["ld1s{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svld1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svld1u{size_literal[2]}_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [u8, u16]] + - [u64, [i64, u64], [u8, u16, u32]] + assert_instr: ["ld1{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svld1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svld1s{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], i16] + - [u64, [i64, u64], [i16, i32]] + assert_instr: ["ld1s{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svld1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + + - name: svld1u{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], u16] + - [u64, [i64, u64], [u16, u32]] + assert_instr: ["ld1{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svld1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + + - name: svldnt1[_{type}] + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ldnt1{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ldnt1.{sve_type}" } + + - name: svldnt1_vnum[_{type}] + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + - non_temporal + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ldnt1{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svldnt1_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld1s{size_literal[1]}_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + assert_instr: ["ld1s{size_literal[1]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ld1.{sve_type[0] as {type[1]}}" + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0] as {type[1]}}" + - FnCall: + - "crate::intrinsics::simd::scalable::sve_cast" + - - FnCall: ["{llvm_link}", [$pg, $base]] + + - name: svld1u{size_literal[1]}_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], u8] + - [[i32, i64, u32, u64], u16] + - [[i64, u64], u32] + assert_instr: ["ld1{size_literal[1]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ld1.{sve_type[0] as {type[1]}}" + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0] as {type[1]}}" + - FnCall: + - "crate::intrinsics::simd::scalable::sve_cast" + - - FnCall: ["{llvm_link}", [$pg, $base]] + - [Type: "{sve_type[0] as {type[1]}}", _] + + - name: svld1s{size_literal[1]}_vnum_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + assert_instr: ["ld1s{size_literal[1]}"] + test: { load: 1 } + compose: + - FnCall: + - "svld1s{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld1u{size_literal[1]}_vnum_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], u8] + - [[i32, i64, u32, u64], u16] + - [[i64, u64], u32] + assert_instr: ["ld1{size_literal[1]}"] + test: { load: 1 } + compose: + - FnCall: + - "svld1u{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld2[_{type}] + attr: [*sve-unstable] + doc: Load two-element tuples into two vectors + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type_x2}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld2{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ld2.sret.{sve_type}" } + + - name: svld2_vnum[_{type}] + attr: [*sve-unstable] + doc: Load two-element tuples into two vectors + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type_x2}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld2{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svld2_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld3[_{type}] + attr: [*sve-unstable] + doc: Load three-element tuples into three vectors + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type_x3}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld3{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ld3.sret.{sve_type}" } + + - name: svld3_vnum[_{type}] + attr: [*sve-unstable] + doc: Load three-element tuples into three vectors + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type_x3}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld3{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svld3_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld4[_{type}] + attr: [*sve-unstable] + doc: Load four-element tuples into four vectors + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type_x4}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld4{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ld4.sret.{sve_type}" } + + - name: svld4_vnum[_{type}] + attr: [*sve-unstable] + doc: Load four-element tuples into four vectors + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type_x4}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld4{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svld4_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld1rq[_{type}] + attr: [*sve-unstable] + doc: Load and replicate 128 bits of data + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld1rq{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ld1rq.{sve_type}" } + + - name: svld1ro[_{type}] + attr: [*sve-unstable] + doc: Load and replicate 256 bits of data + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + target_features: [f64mm] + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld1ro{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ld1ro.{sve_type}" } + + - name: svldnf1[_{type}] + attr: [*sve-unstable] + doc: Unextended load, non-faulting + safety: + unsafe: + - pointer_offset: predicated_non_faulting + - dereference: predicated_non_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ldnf1{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ldnf1.{sve_type}" } + + - name: svldnf1_vnum[_{type}] + attr: [*sve-unstable] + doc: Unextended load, non-faulting + safety: + unsafe: + - pointer_offset_vnum: predicated_non_faulting + - dereference: predicated_non_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ldnf1{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svldnf1_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svldnf1s{size_literal[1]}_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and sign-extend, non-faulting + safety: + unsafe: + - pointer_offset: predicated_non_faulting + - dereference: predicated_non_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + assert_instr: ["ldnf1s{size_literal[1]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ldnf1.{sve_type[0] as {type[1]}}" + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0] as {type[1]}}" + - FnCall: + - "crate::intrinsics::simd::scalable::sve_cast" + - - FnCall: ["{llvm_link}", [$pg, $base]] + + - name: svldnf1u{size_literal[1]}_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and zero-extend, non-faulting + safety: + unsafe: + - pointer_offset: predicated_non_faulting + - dereference: predicated_non_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], u8] + - [[i32, i64, u32, u64], u16] + - [[i64, u64], u32] + assert_instr: ["ldnf1{size_literal[1]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ldnf1.{sve_type[0] as {type[1]}}" + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0] as {type[1]}}" + - FnCall: + - "crate::intrinsics::simd::scalable::sve_cast" + - - FnCall: ["{llvm_link}", [$pg, $base]] + - [Type: "{sve_type[0] as {type[1]}}", _] + + - name: svldnf1s{size_literal[1]}_vnum_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and sign-extend, non-faulting + safety: + unsafe: + - pointer_offset_vnum: predicated_non_faulting + - dereference: predicated_non_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + assert_instr: ["ldnf1s{size_literal[1]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldnf1s{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svldnf1u{size_literal[1]}_vnum_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and zero-extend, non-faulting + safety: + unsafe: + - pointer_offset_vnum: predicated_non_faulting + - dereference: predicated_non_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], u8] + - [[i32, i64, u32, u64], u16] + - [[i64, u64], u32] + assert_instr: ["ldnf1{size_literal[1]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldnf1u{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svldff1[_{type}] + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ldff1{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ldff1.{sve_type}" } + + - name: svldff1_vnum[_{type}] + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset_vnum: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ldff1{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svldff1_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svldff1s{size_literal[1]}_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and sign-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + assert_instr: ["ldff1s{size_literal[1]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ldff1.{sve_type[0] as {type[1]}}" + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0] as {type[1]}}" + - FnCall: + - "crate::intrinsics::simd::scalable::sve_cast" + - - FnCall: ["{llvm_link}", [$pg, $base]] + + - name: svldff1u{size_literal[1]}_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], u8] + - [[i32, i64, u32, u64], u16] + - [[i64, u64], u32] + assert_instr: ["ldff1{size_literal[1]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ldff1.{sve_type[0] as {type[1]}}" + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0] as {type[1]}}" + - FnCall: + - "crate::intrinsics::simd::scalable::sve_cast" + - - FnCall: ["{llvm_link}", [$pg, $base]] + - [Type: "{sve_type[0] as {type[1]}}", _] + + - name: svldff1s{size_literal[1]}_vnum_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and sign-extend, first-faulting + safety: + unsafe: + - pointer_offset_vnum: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + assert_instr: ["ldff1s{size_literal[1]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldff1s{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svldff1u{size_literal[1]}_vnum_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset_vnum: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], u8] + - [[i32, i64, u32, u64], u16] + - [[i64, u64], u32] + assert_instr: ["ldff1{size_literal[1]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldff1u{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svldff1_gather_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: + ["pg: {predicate[0]}", "base: *{type[1]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["ldff1{size_literal[0]}"] + test: { load: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldff1.gather.{type_kind[0].su}xtw.index.{sve_type[1]}" + doubleword: + LLVMLink: + name: "ldff1.gather.index.{sve_type[1]}" + + - name: svldff1_gather_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: + ["pg: {predicate[0]}", "base: *{type[1]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["ldff1{size_literal[0]}"] + test: { load: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldff1.gather.{type_kind[0].su}xtw.{sve_type[1]}" + doubleword: + LLVMLink: + name: "ldff1.gather.{sve_type[1]}" + + - name: svldff1_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ldff1{size_literal[0]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ldff1.gather.scalar.offset.{sve_type[1]}.{sve_type[0]}" + + - name: svldff1_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ldff1{size_literal[0]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldff1_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svldff1_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ldff1{size_literal[0]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldff1_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]] + + - name: svldff1s{size_literal[2]}_gather_[{type[0]}]index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [i32, u32], i16] + - [[i64, u64], [i64, u64], [i16, i32]] + assert_instr: ["ldff1s{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldff1.gather.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ldff1.gather.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::scalable::sve_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $indices]] + + - name: svldff1u{size_literal[2]}_gather_[{type[0]}]index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [u32, i32], u16] + - [[i64, u64], [u64, i64], [u16, u32]] + assert_instr: ["ldff1{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldff1.gather.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ldff1.gather.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::scalable::sve_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $indices]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svldff1s{size_literal[2]}_gather_[{type[0]}]offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [i32, u32], [i8, i16]] + - [[i64, u64], [i64, u64], [i8, i16, i32]] + assert_instr: ["ldff1s{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldff1.gather.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ldff1.gather.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::scalable::sve_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]] + + - name: svldff1u{size_literal[2]}_gather_[{type[0]}]offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [u32, i32], [u8, u16]] + - [[i64, u64], [u64, i64], [u8, u16, u32]] + assert_instr: ["ldff1{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldff1.gather.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ldff1.gather.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::scalable::sve_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svldff1s{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["ldff1s{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ldff1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::scalable::sve_cast + - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]] + + - name: svldff1u{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [u32, i32], [u8, u16]] + - [u64, [u64, i64], [u8, u16, u32]] + assert_instr: ["ldff1{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ldff1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::scalable::sve_cast + - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svldff1s{size_literal[2]}_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + doc: Load {size[2]}-bit data and sign-extend, first-faulting + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["ldff1s{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldff1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svldff1u{size_literal[2]}_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [u8, u16]] + - [u64, [i64, u64], [u8, u16, u32]] + assert_instr: ["ldff1{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldff1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svldff1s{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], i16] + - [u64, [i64, u64], [i16, i32]] + assert_instr: ["ldff1s{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldff1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + + - name: svldff1u{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], u16] + - [u64, [i64, u64], [u16, u32]] + assert_instr: ["ldff1{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldff1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + + - name: svrdffr_z + attr: [*sve-unstable] + doc: Read FFR, returning predicate of succesfully loaded elements + arguments: ["pg: svbool_t"] + return_type: svbool_t + assert_instr: [rdffr] + compose: + - LLVMLink: { name: "rdffr.z" } + + - name: svrdffr + attr: [*sve-unstable] + doc: Read FFR, returning predicate of succesfully loaded elements + arguments: [] + return_type: svbool_t + assert_instr: [rdffr] + compose: + - FnCall: [svrdffr_z, [FnCall: [svptrue_b8, []]]] + + - name: svsetffr + attr: [*sve-unstable] + doc: Initialize the first-fault register to all-true + arguments: [] + assert_instr: [setffr] + compose: + - LLVMLink: { name: "setffr" } + + - name: svwrffr + attr: [*sve-unstable] + doc: Write to the first-fault register + arguments: ["op: svbool_t"] + assert_instr: [wrffr] + compose: + - LLVMLink: { name: "wrffr" } + + - name: svqinc{size_literal[1]}[_n_{type[0]}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type[1]}" + default: word + byte: byte + halfword: halfword + doubleword: doubleword + doc: Saturating increment by number of {textual_size} elements + arguments: ["op: {type[0]}"] + static_defs: ["const IMM_FACTOR: i32"] + return_type: "{type[0]}" + types: + - [[i32, i64, u32, u64], [i8, i16, i32, i64]] + assert_instr: + - ["{type_kind[0].su}qinc{size_literal[1]}", "IMM_FACTOR = 1"] + compose: + - FnCall: + - "svqinc{size_literal[1]}_pat_n_{type[0]}" + - [$op] + - ["{{svpattern::SV_ALL}}", $IMM_FACTOR] + + - name: svqinc{size_literal[1]}_pat[_n_{type[0]}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type[1]}" + default: word + byte: byte + halfword: halfword + doubleword: doubleword + doc: Saturating increment by number of {textual_size} elements + arguments: ["op: {type[0]}"] + static_defs: ["const PATTERN: svpattern", "const IMM_FACTOR: i32"] + constraints: [{ variable: IMM_FACTOR, range: [1, 16] }] + return_type: "{type[0]}" + types: + - [[i32, i64, u32, u64], [i8, i16, i32, i64]] + assert_instr: + - - "{type_kind[0].su}qinc{size_literal[1]}" + - "PATTERN = {{svpattern::SV_ALL}}, IMM_FACTOR = 1" + compose: + - LLVMLink: + name: "{type_kind[0].su}qinc{size_literal[1]}.n{size[0]}" + arguments: ["op: {type[0]}", "pattern: svpattern", "imm_factor: i32"] + return_type: "{type[0]}" + - FnCall: ["{llvm_link}", [$op, $PATTERN, $IMM_FACTOR]] + + - name: svqinc{size_literal}[_{type}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type}" + default: word + halfword: halfword + doubleword: doubleword + doc: Saturating increment by number of {textual_size} elements + arguments: ["op: {sve_type}"] + static_defs: ["const IMM_FACTOR: i32"] + return_type: "{sve_type}" + types: [i16, u16, i32, u32, i64, u64] + assert_instr: [["{type_kind.su}qinc{size_literal}", "IMM_FACTOR = 1"]] + compose: + - FnCall: + - "svqinc{size_literal}_pat_{type}" + - [$op] + - ["{{svpattern::SV_ALL}}", $IMM_FACTOR] + + - name: svqinc{size_literal}_pat[_{type}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type}" + default: word + halfword: halfword + doubleword: doubleword + doc: Saturating increment by number of {textual_size} elements + arguments: ["op: {sve_type}"] + static_defs: ["const PATTERN: svpattern", "const IMM_FACTOR: i32"] + constraints: [{ variable: IMM_FACTOR, range: [1, 16] }] + return_type: "{sve_type}" + types: [i16, u16, i32, u32, i64, u64] + assert_instr: + - - "{type_kind.su}qinc{size_literal}" + - "PATTERN = {{svpattern::SV_ALL}}, IMM_FACTOR = 1" + compose: + - LLVMLink: + name: "{type_kind.su}qinc{size_literal}.{sve_type}" + arguments: ["op: {sve_type}", "pattern: svpattern", "imm_factor: i32"] + return_type: "{sve_type}" + - FnCall: ["{llvm_link}", [$op, $PATTERN, $IMM_FACTOR]] + + - name: svqdec{size_literal[1]}[_n_{type[0]}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type[1]}" + default: word + byte: byte + halfword: halfword + doubleword: doubleword + doc: Saturating decrement by number of {textual_size} elements + arguments: ["op: {type[0]}"] + static_defs: ["const IMM_FACTOR: i32"] + return_type: "{type[0]}" + types: + - [[i32, i64, u32, u64], [i8, i16, i32, i64]] + assert_instr: + - ["{type_kind[0].su}qdec{size_literal[1]}", "IMM_FACTOR = 1"] + compose: + - FnCall: + - "svqdec{size_literal[1]}_pat_n_{type[0]}" + - [$op] + - ["{{svpattern::SV_ALL}}", $IMM_FACTOR] + + - name: svqdec{size_literal[1]}_pat[_n_{type[0]}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type[1]}" + default: word + byte: byte + halfword: halfword + doubleword: doubleword + doc: Saturating decrement by number of {textual_size} elements + arguments: ["op: {type[0]}"] + static_defs: ["const PATTERN: svpattern", "const IMM_FACTOR: i32"] + constraints: [{ variable: IMM_FACTOR, range: [1, 16] }] + return_type: "{type[0]}" + types: + - [[i32, i64, u32, u64], [i8, i16, i32, i64]] + assert_instr: + - - "{type_kind[0].su}qdec{size_literal[1]}" + - "PATTERN = {{svpattern::SV_ALL}}, IMM_FACTOR = 1" + compose: + - LLVMLink: + name: "{type_kind[0].su}qdec{size_literal[1]}.n{size[0]}" + arguments: ["op: {type[0]}", "pattern: svpattern", "imm_factor: i32"] + return_type: "{type[0]}" + - FnCall: ["{llvm_link}", [$op, $PATTERN, $IMM_FACTOR]] + + - name: svqdec{size_literal}[_{type}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type}" + default: word + halfword: halfword + doubleword: doubleword + doc: Saturating decrement by number of {textual_size} elements + arguments: ["op: {sve_type}"] + static_defs: ["const IMM_FACTOR: i32"] + return_type: "{sve_type}" + types: [i16, u16, i32, u32, i64, u64] + assert_instr: [["{type_kind.su}qdec{size_literal}", "IMM_FACTOR = 1"]] + compose: + - FnCall: + - "svqdec{size_literal}_pat_{type}" + - [$op] + - ["{{svpattern::SV_ALL}}", $IMM_FACTOR] + + - name: svqdec{size_literal}_pat[_{type}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type}" + default: word + halfword: halfword + doubleword: doubleword + doc: Saturating decrement by number of {textual_size} elements + arguments: ["op: {sve_type}"] + static_defs: ["const PATTERN: svpattern", "const IMM_FACTOR: i32"] + constraints: [{ variable: IMM_FACTOR, range: [1, 16] }] + return_type: "{sve_type}" + types: [i16, u16, i32, u32, i64, u64] + assert_instr: + - - "{type_kind.su}qdec{size_literal}" + - "PATTERN = {{svpattern::SV_ALL}}, IMM_FACTOR = 1" + compose: + - LLVMLink: + name: "{type_kind.su}qdec{size_literal}.{sve_type}" + arguments: ["op: {sve_type}", "pattern: svpattern", "imm_factor: i32"] + return_type: "{sve_type}" + - FnCall: ["{llvm_link}", [$op, $PATTERN, $IMM_FACTOR]] + + - name: svst1[_{type}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st1{size_literal}"] + test: { store: 0 } + compose: + - LLVMLink: + name: "st1.{sve_type}" + arguments: + - "data: {sve_type}" + - "pg: {predicate}" + - "ptr: *mut {type}" + - FnCall: ["{llvm_link}", [$data, $pg, $base]] + + - name: svst1_scatter_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "indices: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [[i32, u32], [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["st1{size_literal[0]}"] + test: { store: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "st1.scatter.{type_kind[0].su}xtw.index.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "indices: {sve_type[0]}" + doubleword: + LLVMLink: + name: "st1.scatter.index.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "indices: {sve_type[0]}" + - FnCall: ["{llvm_link}", [$data, $pg, $base, $indices]] + + - name: svst1_scatter_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "offsets: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [[i32, u32], [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["st1{size_literal[0]}"] + test: { store: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "st1.scatter.{type_kind[0].su}xtw.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "offsets: {sve_type[0]}" + doubleword: + LLVMLink: + name: "st1.scatter.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "offsets: {sve_type[0]}" + - FnCall: ["{llvm_link}", [$data, $pg, $base, $offsets]] + + - name: svst1_scatter[_{type[0]}base]_offset[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + - "data: {sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["st1{size_literal[0]}"] + test: { store: 1 } + compose: + - LLVMLink: + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + name: "st1.scatter.scalar.offset.{sve_type[1]}.{sve_type[0]}" + - FnCall: ["{llvm_link}", [$data, $pg, $bases, $offset]] + + - name: svst1_scatter[_{type[0]}base_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: + ["pg: {predicate[0]}", "bases: {sve_type[0]}", "data: {sve_type[1]}"] + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["st1{size_literal[0]}"] + test: { store: 1 } + compose: + - FnCall: + - "svst1_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + - $data + + - name: svst1_scatter[_{type[0]}base]_index[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "index: i64" + - "data: {sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["st1{size_literal[0]}"] + test: { store: 1 } + compose: + - FnCall: + - "svst1_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]] + - $data + + - name: svst1{size_literal[2]}_scatter_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "indices: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [[i32, u32], i32, i16] + - [[i32, u32], u32, u16] + - [[i64, u64], i64, [i16, i32]] + - [[i64, u64], u64, [u16, u32]] + assert_instr: ["st1{size_literal[2]}"] + test: { store: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "st1.scatter.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "indices: {sve_type[0]}" + doubleword: + LLVMLink: + name: "st1.scatter.index.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "indices: {sve_type[0]}" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::scalable::sve_cast", [$data]], $pg, $base, $indices] + + - name: svst1{size_literal[2]}_scatter_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "offsets: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [[i32, u32], i32, [i8, i16]] + - [[i32, u32], u32, [u8, u16]] + - [[i64, u64], i64, [i8, i16, i32]] + - [[i64, u64], u64, [u8, u16, u32]] + assert_instr: ["st1{size_literal[2]}"] + test: { store: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "st1.scatter.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "offsets: {sve_type[0]}" + doubleword: + LLVMLink: + name: "st1.scatter.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "offsets: {sve_type[0]}" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::scalable::sve_cast", [$data]], $pg, $base, $offsets] + + - name: svst1{size_literal[2]}_scatter[_{type[0]}base]_offset[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + - "data: {sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["st1{size_literal[2]}"] + test: { store: 2 } + compose: + - LLVMLink: + name: "st1.scatter.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::scalable::sve_cast", [$data]], $pg, $bases, $offset] + + - name: svst1{size_literal[2]}_scatter[_{type[0]}base_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: + ["pg: {predicate[0]}", "bases: {sve_type[0]}", "data: {sve_type[1]}"] + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["st1{size_literal[2]}"] + test: { store: 2 } + compose: + - FnCall: + - "svst1{size_literal[2]}_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + - $data + + - name: svst1{size_literal[2]}_scatter[_{type[0]}base]_index[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "index: i64" + - "data: {sve_type[1]}" + types: + - [u32, [i32, u32], i16] + - [u64, [i64, u64], [i16, i32]] + assert_instr: ["st1{size_literal[2]}"] + test: { store: 2 } + compose: + - FnCall: + - "svst1{size_literal[2]}_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + - $data + + - name: svstnt1[_{type}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["stnt1{size_literal}"] + test: { store: 0 } + compose: + - LLVMLink: + name: "stnt1.{sve_type}" + arguments: + - "data: {sve_type}" + - "pg: {predicate}" + - "ptr: *mut {type}" + - FnCall: ["{llvm_link}", [$data, $pg, $base]] + + - name: svstnt1_vnum[_{type}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate}", "base: *mut {type}", "vnum: i64", "data: {sve_type}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["stnt1{size_literal}"] + test: { store: 0 } + compose: + - FnCall: + - "svstnt1_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + - $data + + - name: svst1{size_literal[1]}[_{type[0]}] + attr: [*sve-unstable] + doc: Truncate to {size[1]} bits and store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *mut {type[1]}", "data: {sve_type[0]}"] + types: + - [[i16, i32, i64], i8] + - [[u16, u32, u64], u8] + - [[i32, i64], i16] + - [[u32, u64], u16] + - [i64, i32] + - [u64, u32] + assert_instr: ["st1{size_literal[1]}"] + test: { store: 1 } + compose: + - LLVMLink: + name: "st1.{sve_type[0] as {type[1]}}" + arguments: + - "data: {sve_type[0] as {type[1]}}" + - "pg: {predicate[0]}" + - "ptr: *mut {type[1]}" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::scalable::sve_cast", [$data]], $pg, $base] + + - name: svst1{size_literal[1]}_vnum[_{type[0]}] + attr: [*sve-unstable] + doc: Truncate to {size[1]} bits and store + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "vnum: i64" + - "data: {sve_type[0]}" + types: + - [[i16, i32, i64], i8] + - [[u16, u32, u64], u8] + - [[i32, i64], i16] + - [[u32, u64], u16] + - [i64, i32] + - [u64, u32] + assert_instr: ["st1{size_literal[1]}"] + test: { store: 1 } + compose: + - FnCall: + - "svst1{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + - $data + + - name: svst1_vnum[_{type}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate}", "base: *mut {type}", "vnum: i64", "data: {sve_type}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st1{size_literal}"] + test: { store: 0 } + compose: + - FnCall: + - "svst1_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + - $data + + - name: svst2[_{type}] + attr: [*sve-unstable] + doc: Store two vectors into two-element tuples + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type_x2}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st2{size_literal}"] + test: { store: 0 } + compose: + - LLVMLink: + name: "st2.{sve_type}" + arguments: + - "data0: {sve_type}" + - "data1: {sve_type}" + - "pg: {predicate}" + - "ptr: *mut {type}" + - FnCall: + - "{llvm_link}" + - - FnCall: ["svget2_{type}", ["$data"], [0]] + - FnCall: ["svget2_{type}", ["$data"], [1]] + - "$pg" + - "$base" + + - name: svst2_vnum[_{type}] + attr: [*sve-unstable] + doc: Store two vectors into two-element tuples + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + - "pg: {predicate}" + - "base: *mut {type}" + - "vnum: i64" + - "data: {sve_type_x2}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st2{size_literal}"] + test: { store: 0 } + compose: + - FnCall: + - "svst2_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + - $data + + - name: svst3[_{type}] + attr: [*sve-unstable] + doc: Store three vectors into three-element tuples + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type_x3}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st3{size_literal}"] + test: { store: 0 } + compose: + - LLVMLink: + name: "st3.{sve_type}" + arguments: + - "data0: {sve_type}" + - "data1: {sve_type}" + - "data2: {sve_type}" + - "pg: {predicate}" + - "ptr: *mut {type}" + - FnCall: + - "{llvm_link}" + - - FnCall: ["svget3_{type}", ["$data"], [0]] + - FnCall: ["svget3_{type}", ["$data"], [1]] + - FnCall: ["svget3_{type}", ["$data"], [2]] + - "$pg" + - "$base" + + - name: svst3_vnum[_{type}] + attr: [*sve-unstable] + doc: Store three vectors into three-element tuples + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: + - "pg: {predicate}" + - "base: *mut {type}" + - "vnum: i64" + - "data: {sve_type_x3}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st3{size_literal}"] + test: { store: 0 } + compose: + - FnCall: + - "svst3_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + - $data + + - name: svst4[_{type}] + attr: [*sve-unstable] + doc: Store four vectors into four-element tuples + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type_x4}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st4{size_literal}"] + test: { store: 0 } + compose: + - LLVMLink: + name: "st4.{sve_type}" + arguments: + - "data0: {sve_type}" + - "data1: {sve_type}" + - "data2: {sve_type}" + - "data3: {sve_type}" + - "pg: {predicate}" + - "ptr: *mut {type}" + - FnCall: + - "{llvm_link}" + - - FnCall: ["svget4_{type}", ["$data"], [0]] + - FnCall: ["svget4_{type}", ["$data"], [1]] + - FnCall: ["svget4_{type}", ["$data"], [2]] + - FnCall: ["svget4_{type}", ["$data"], [3]] + - "$pg" + - "$base" + + - name: svst4_vnum[_{type}] + attr: [*sve-unstable] + doc: Store four vectors into four-element tuples + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: + - "pg: {predicate}" + - "base: *mut {type}" + - "vnum: i64" + - "data: {sve_type_x4}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st4{size_literal}"] + test: { store: 0 } + compose: + - FnCall: + - "svst4_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + - $data + + - name: svtbl[_{type[0]}] + attr: [*sve-unstable] + doc: Table lookup in single-vector table + arguments: ["data: {sve_type[0]}", "indices: {sve_type[1]}"] + return_type: "{sve_type[0]}" + assert_instr: [tbl] + types: + - [f32, u32] + - [f64, u64] + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + - [u8, u8] + - [u16, u16] + - [u32, u32] + - [u64, u64] + compose: + - LLVMLink: { name: "tbl.{sve_type[0]}" } + + - name: svwhilele_{type[1]}[_{type[0]}] + attr: [*sve-unstable] + doc: While incrementing scalar is less than or equal to + arguments: ["op1: {type[0]}", "op2: {type[0]}"] + return_type: "{sve_type[1]}" + types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]] + assert_instr: [{ default: whilele, unsigned: whilels }] + compose: + - MatchKind: + - "{type[0]}" + - default: { LLVMLink: { name: "whilele.{sve_type[1]}.{type[0]}" } } + unsigned: { LLVMLink: { name: "whilels.{sve_type[1]}.{type[0]}" } } + + - name: svwhilelt_{type[1]}[_{type[0]}] + attr: [*sve-unstable] + doc: While incrementing scalar is less than + arguments: ["op1: {type[0]}", "op2: {type[0]}"] + return_type: "{sve_type[1]}" + types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]] + assert_instr: [{ default: whilelt, unsigned: whilelo }] + compose: + - MatchKind: + - "{type[0]}" + - default: { LLVMLink: { name: "whilelt.{sve_type[1]}.{type[0]}" } } + unsigned: { LLVMLink: { name: "whilelo.{sve_type[1]}.{type[0]}" } } + + - name: svmax[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Maximum + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64, f32, f64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind}max"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.fsu}max.{sve_type}" } + + - name: svmaxnm[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Maximum number + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { select: op1 } + assert_instr: [fmaxnm] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}maxnm.{sve_type}" } + + - name: svpfalse[_b] + attr: [*sve-unstable] + doc: Set all predicate elements to false + arguments: [] + return_type: "svbool_t" + assert_instr: [pfalse] + compose: + - FnCall: + - "svdupq_n_b8" + - - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + + - name: svptrue_pat_{type} + attr: [*sve-unstable] + doc: Set predicate elements to true + arguments: [] + static_defs: ["const PATTERN: svpattern"] + return_type: "{predicate}" + types: [b8, b16, b32, b64] + assert_instr: [[ptrue, "PATTERN = {{svpattern::SV_ALL}}"]] + compose: + - LLVMLink: + name: ptrue.{sve_type} + arguments: ["pattern: svpattern"] + - FnCall: ["{llvm_link}", [$PATTERN]] + + - name: svptrue_{type} + attr: [*sve-unstable] + doc: Set predicate elements to true + arguments: [] + return_type: "svbool_t" + types: [b8, b16, b32, b64] + assert_instr: [ptrue] + compose: + - FnCall: ["svptrue_pat_{type}", [], ["{{svpattern::SV_ALL}}"]] + + - name: svptest_any + attr: [*sve-unstable] + doc: Test whether any active element is true + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: "bool" + assert_instr: [ptest] + compose: + - LLVMLink: { name: "ptest.any.nxv16i1" } + + - name: svptest_first + attr: [*sve-unstable] + doc: Test whether first active element is true + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: "bool" + assert_instr: [ptest] + compose: + - LLVMLink: { name: "ptest.first.nxv16i1" } + + - name: svptest_last + attr: [*sve-unstable] + doc: Test whether last active element is true + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: "bool" + assert_instr: [ptest] + compose: + - LLVMLink: { name: "ptest.last.nxv16i1" } + + - name: svpfirst[_b] + attr: [*sve-unstable] + doc: Set the first active predicate element to true + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: "svbool_t" + assert_instr: [pfirst] + compose: + - LLVMLink: { name: "pfirst.nxv16i1" } + + - name: svpnext_{type} + attr: [*sve-unstable] + doc: Find next active predicate + arguments: ["pg: {predicate}", "op: {predicate}"] + return_type: "{predicate}" + types: [b8, b16, b32, b64] + assert_instr: [pnext] + compose: + - LLVMLink: { name: "pnext.{sve_type}" } + + - name: svbrkn[_b]_z + attr: [*sve-unstable] + doc: Propagate break to next partition + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: "svbool_t" + assert_instr: [brkn] + compose: + - LLVMLink: { name: "brkn.z.nxv16i1" } + + - name: svbrkb[_b]_z + attr: [*sve-unstable] + doc: Break before first true condition + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: "svbool_t" + assert_instr: [brkb] + compose: + - LLVMLink: { name: "brkb.z.nxv16i1" } + + - name: svbrkb[_b]_m + attr: [*sve-unstable] + doc: Break before first true condition + arguments: ["inactive: svbool_t", "pg: svbool_t", "op: svbool_t"] + return_type: "svbool_t" + assert_instr: [brkb] + compose: + - LLVMLink: { name: "brkb.nxv16i1" } + + - name: svbrkpb[_b]_z + attr: [*sve-unstable] + doc: Break before first true condition, propagating from previous partition + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: "svbool_t" + assert_instr: [brkpb] + compose: + - LLVMLink: { name: "brkpb.z.nxv16i1" } + + - name: svbrka[_b]_z + attr: [*sve-unstable] + doc: Break after first true condition + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: "svbool_t" + assert_instr: [brka] + compose: + - LLVMLink: { name: "brka.z.nxv16i1" } + + - name: svbrka[_b]_m + attr: [*sve-unstable] + doc: Break after first true condition + arguments: ["inactive: svbool_t", "pg: svbool_t", "op: svbool_t"] + return_type: "svbool_t" + assert_instr: [brka] + compose: + - LLVMLink: { name: "brka.nxv16i1" } + + - name: svbrkpa[_b]_z + attr: [*sve-unstable] + doc: Break after first true condition, propagating from previous partition + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: "svbool_t" + assert_instr: [brkpa] + compose: + - LLVMLink: { name: "brkpa.z.nxv16i1" } + + - name: svsel[_b] + attr: [*sve-unstable] + doc: Conditionally select elements + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: "svbool_t" + assert_instr: [sel] + compose: + - FnCall: ["simd_select", [$pg, $op1, $op2]] + + - name: svsel[_{type}] + attr: [*sve-unstable] + doc: Conditionally select elements + arguments: ["pg: svbool_t", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [sel] + compose: + - FnCall: + - "simd_select" + - - MatchSize: + - "{type}" + - { default: { MethodCall: [$pg, sve_into, []] }, byte: $pg } + - $op1 + - $op2 + - - MatchSize: + - "{type}" + - byte: svbool_t + halfword: svbool8_t + default: svbool4_t + doubleword: svbool2_t + - _ + + - name: svsub[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Subtract + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64, f32, f64] + assert_instr: ["{type_kind.f}sub"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}sub.{sve_type}" } + + - name: svsubr[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Subtract reversed + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64, f32, f64] + assert_instr: ["{type_kind.f}subr"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}subr.{sve_type}" } + + - name: svcntp_{predicate} + attr: [*sve-unstable] + doc: Count set predicate bits + arguments: ["pg: {predicate}", "op: {predicate}"] + types: [b8, b16, b32, b64] + return_type: u64 + assert_instr: [cntp] + compose: + - LLVMLink: { name: "cntp.{predicate}" } + + - name: svcompact[_{type}] + attr: [*sve-unstable] + doc: Shuffle active elements of vector to the right and fill with zero + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i32, i64, u32, u64] + assert_instr: [compact] + compose: + - LLVMLink: { name: "compact.{sve_type}" } + + - name: svlasta[_{type}] + attr: [*sve-unstable] + doc: Extract element after last + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [lasta] + compose: + - LLVMLink: { name: "lasta.{sve_type}" } + + - name: svclasta[_{type}] + attr: [*sve-unstable] + doc: Conditionally extract element after last + arguments: ["pg: {predicate}", "fallback: {sve_type}", "data: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [clasta] + compose: + - LLVMLink: { name: "clasta.{sve_type}" } + + - name: svclasta[_n_{type}] + attr: [*sve-unstable] + doc: Conditionally extract element after last + arguments: ["pg: {predicate}", "fallback: {type}", "data: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [clasta] + compose: + - LLVMLink: { name: "clasta.n.{sve_type}" } + + - name: svlastb[_{type}] + attr: [*sve-unstable] + doc: Extract last element + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [lastb] + compose: + - LLVMLink: { name: "lastb.{sve_type}" } + + - name: svclastb[_{type}] + attr: [*sve-unstable] + doc: Conditionally extract last element + arguments: ["pg: {predicate}", "fallback: {sve_type}", "data: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [clastb] + compose: + - LLVMLink: { name: "clastb.{sve_type}" } + + - name: svclastb[_n_{type}] + attr: [*sve-unstable] + doc: Conditionally extract last element + arguments: ["pg: {predicate}", "fallback: {type}", "data: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [clastb] + compose: + - LLVMLink: { name: "clastb.n.{sve_type}" } + + - name: svqdecp[_{type}] + attr: [*sve-unstable] + doc: Saturating decrement by active element count + arguments: ["op: {sve_type}", "pg: {predicate}"] + return_type: "{sve_type}" + types: [i16, i32, i64, u16, u32, u64] + assert_instr: ["{type_kind.su}qdecp"] + compose: + - LLVMLink: { name: "{type_kind.su}qdecp.{sve_type}" } + + - name: svqdecp[_n_{type[0]}]_{type[1]} + attr: [*sve-unstable] + doc: Saturating decrement by active element count + arguments: ["op: {type[0]}", "pg: {sve_type[1]}"] + return_type: "{type[0]}" + types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]] + assert_instr: ["{type_kind[0].su}qdecp"] + compose: + - LLVMLink: { name: "{type_kind[0].su}qdecp.n{size[0]}.{sve_type[1]}" } + + - name: svqincp[_{type}] + attr: [*sve-unstable] + doc: Saturating increment by active element count + arguments: ["op: {sve_type}", "pg: {predicate}"] + return_type: "{sve_type}" + types: [i16, i32, i64, u16, u32, u64] + assert_instr: ["{type_kind.su}qincp"] + compose: + - LLVMLink: { name: "{type_kind.su}qincp.{sve_type}" } + + - name: svqincp[_n_{type[0]}]_{type[1]} + attr: [*sve-unstable] + doc: Saturating increment by active element count + arguments: ["op: {type[0]}", "pg: {sve_type[1]}"] + return_type: "{type[0]}" + types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]] + assert_instr: ["{type_kind[0].su}qincp"] + compose: + - LLVMLink: { name: "{type_kind[0].su}qincp.n{size[0]}.{sve_type[1]}" } + + - name: svtmad[_{type}] + attr: [*sve-unstable] + doc: Trigonometric multiply-add coefficient + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, range: [0, 7] }] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [[ftmad, "IMM3 = 0"]] + compose: + - LLVMLink: + name: "ftmad.x.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: + - "{llvm_link}" + - [op1, op2, IMM3] + + - name: svtsmul[_{type[0]}] + attr: [*sve-unstable] + doc: Trigonometric starting value + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [f32, u32] + - [f64, u64] + assert_instr: [ftsmul] + compose: + - LLVMLink: + name: "ftsmul.x.{sve_type[0]}" + + - name: svtssel[_{type[0]}] + attr: [*sve-unstable] + doc: Trigonometric select coefficient + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [f32, u32] + - [f64, u64] + assert_instr: [ftssel] + compose: + - LLVMLink: + name: "ftssel.x.{sve_type[0]}" + + - name: svprf{size_literal} + attr: [*sve-unstable] + safety: + unsafe: + - pointer_offset: predicated + substitutions: + textual_size: + match_size: "{type}" + default: words + byte: bytes + halfword: halfwords + doubleword: doublewords + doc: Prefetch {textual_size} + arguments: ["pg: {predicate}", "base: *T"] + static_defs: ["const OP: svprfop", T] + types: [b8, b16, b32, b64] + assert_instr: + - ["prf{size_literal}", "OP = {{svprfop::SV_PLDL1KEEP}}, T = i64"] + test: { load: 0 } + compose: + - LLVMLink: + name: "prf.{sve_type}" + arguments: + ["pg: {predicate}", "base: *crate::ffi::c_void", "op: svprfop"] + - FnCall: + - "{llvm_link}" + - - $pg + - CastAs: [$base, "*const crate::ffi::c_void"] + - $OP + + - name: svprf{size_literal}_vnum + attr: [*sve-unstable] + safety: + unsafe: + - pointer_offset_vnum: predicated + substitutions: + textual_size: + match_size: "{type}" + default: words + byte: bytes + halfword: halfwords + doubleword: doublewords + doc: Prefetch {textual_size} + arguments: ["pg: {predicate}", "base: *T", "vnum: i64"] + static_defs: ["const OP: svprfop", T] + types: [b8, b16, b32, b64] + assert_instr: + - ["prf{size_literal}", "OP = {{svprfop::SV_PLDL1KEEP}}, T = i64"] + test: { load: 0 } + compose: + - FnCall: + - "svprf{size_literal}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + - - $OP + - _ + + - name: svprf{size_literal[1]}_gather_[{type[0]}]{index_or_offset} + attr: [*sve-unstable] + safety: + unsafe: + - pointer_offset: predicated + substitutions: + index_or_offset: + { match_size: "{type[1]}", default: "index", byte: "offset" } + indices_or_offsets: + { match_size: "{type[1]}", default: "indices", byte: "offsets" } + textual_size: + match_size: "{type[1]}" + default: words + byte: bytes + halfword: halfwords + doubleword: doublewords + doc: Prefetch {textual_size} + types: + - [[i32, u32, i64, u64], [i8, i16, i32, i64]] + arguments: + ["pg: {predicate[0]}", "base: *T", "{indices_or_offsets}: {sve_type[0]}"] + static_defs: ["const OP: svprfop", T] + assert_instr: + [["prf{size_literal[1]}", "OP = {{svprfop::SV_PLDL1KEEP}}, T = i64"]] + test: { load: 0 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "prf{size_literal[1]}.gather.{type_kind[0].su}xtw.index.{sve_type[0]}" + arguments: + - "pg: {predicate[0]}" + - "base: *crate::ffi::c_void" + - "{indices_or_offsets}: {sve_type[0]}" + - "op: svprfop" + doubleword: + LLVMLink: + name: "prf{size_literal[1]}.gather.index.{sve_type[0]}" + arguments: + - "pg: {predicate[0]}" + - "base: *crate::ffi::c_void" + - "{indices_or_offsets}: {sve_type[0]}" + - "op: svprfop" + - FnCall: + - "{llvm_link}" + - - $pg + - CastAs: [$base, "*const crate::ffi::c_void"] + - "${indices_or_offsets}" + - $OP + + - name: svprf{size_literal[1]}_gather[_{type[0]}base] + attr: [*sve-unstable] + safety: + unsafe: + - pointer_offset: predicated + - no_provenance: bases + substitutions: + textual_size: + match_size: "{type[1]}" + default: words + byte: bytes + halfword: halfwords + doubleword: doublewords + doc: Prefetch {textual_size} + types: + - [[u32, u64], [i8, i16, i32, i64]] + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + static_defs: ["const OP: svprfop"] + assert_instr: [["prf{size_literal[1]}", "OP = {{svprfop::SV_PLDL1KEEP}}"]] + test: { load: 0 } + compose: + - LLVMLink: + name: "prf{size_literal[1]}.gather.scalar.offset.{sve_type[0]}" + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "index: i64" + - "op: svprfop" + - FnCall: ["{llvm_link}", [$pg, $bases, 0, $OP]] + + - name: svprf{size_literal[1]}_gather[_{type[0]}base]_{index_or_offset} + attr: [*sve-unstable] + safety: + unsafe: + - pointer_offset: predicated + - no_provenance: bases + substitutions: + index_or_offset: + { match_size: "{type[1]}", default: "index", byte: "offset" } + textual_size: + match_size: "{type[1]}" + default: words + byte: bytes + halfword: halfwords + doubleword: doublewords + doc: Prefetch {textual_size} + types: + - [[u32, u64], [i8, i16, i32, i64]] + arguments: + ["pg: {predicate[0]}", "bases: {sve_type[0]}", "{index_or_offset}: i64"] + static_defs: ["const OP: svprfop"] + assert_instr: [["prfb", "OP = {{svprfop::SV_PLDL1KEEP}}"]] + test: { load: 0 } + compose: + - LLVMLink: + name: "prf{size_literal[1]}.gather.scalar.offset.{sve_type[0]}" + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "{index_or_offset}: i64" + - "op: svprfop" + - FnCall: + - "{llvm_link}" + - - $pg + - $bases + - MatchSize: + - "{type[1]}" + - byte: $offset + halfword: { MethodCall: [$index, unchecked_shl, [1]] } + default: { MethodCall: [$index, unchecked_shl, [2]] } + doubleword: { MethodCall: [$index, unchecked_shl, [3]] } + - $OP + + - name: svcvt_{type[0]}[_{type[1]}]{_mxz} + attr: [*sve-unstable] + doc: Floating-point convert + arguments: + ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[f32, f64], [i32, u32, i64, u64]] + zeroing_method: { drop: inactive } + substitutions: + convert_from: { match_kind: "{type[1]}", default: s, unsigned: u } + assert_instr: ["{convert_from}cvtf"] + compose: + - MatchSize: + - "{type[0]}" + - default: + MatchSize: + - "{type[1]}" + - default: + LLVMLink: + name: "{convert_from}cvtf.{sve_type[0]}.{sve_type[1]}" + doubleword: + LLVMLink: + name: "{convert_from}cvtf.{type[0]}{type[1]}" + doubleword: + LLVMLink: + name: "{convert_from}cvtf.{sve_type[0]}.{sve_type[1]}" + + - name: svcvt_{type[0]}[_{type[1]}]{_mxz} + attr: [*sve-unstable] + doc: Floating-point convert + arguments: + ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i32, u32, i64, u64], [f32, f64]] + zeroing_method: { drop: inactive } + substitutions: + convert_to: { match_kind: "{type[0]}", default: s, unsigned: u } + assert_instr: ["fcvtz{convert_to}"] + compose: + - LLVMLink: { name: "fcvtz{convert_to}.{type[0]}{type[1]}" } + + - name: svcvt_{type[0]}[_{type[1]}]{_mxz} + attr: [*sve-unstable] + doc: Floating-point convert + arguments: + ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f32, f64], [f64, f32]] + zeroing_method: { drop: inactive } + assert_instr: [fcvt] + compose: + - LLVMLink: { name: "fcvt.{type[0]}{type[1]}" } + + - name: svreinterpret_{type[0]}[_{type[1]}] + attr: [*sve-unstable] + doc: Reinterpret vector contents + arguments: ["op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + assert_instr: [] + types: + - - [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + - [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + compose: + - FnCall: ["crate::intrinsics::transmute_unchecked", [$op], [], true] + + - name: svrinta[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round to nearest, ties away from zero + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frinta] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frinta.{sve_type}" } + + - name: svrinti[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round using current rounding mode (inexact) + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frinti] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frinti.{sve_type}" } + + - name: svrintm[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round towards -∞ + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frintm] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frintm.{sve_type}" } + + - name: svrintn[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round to nearest, ties to even + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frintn] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frintn.{sve_type}" } + + - name: svrintp[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round towards +∞ + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frintp] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frintp.{sve_type}" } + + - name: svrintx[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round using current rounding mode (exact) + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frintx] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frintx.{sve_type}" } + + - name: svrintz[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round towards zero + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frintz] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frintz.{sve_type}" } + + - name: svabd[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Absolute difference + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f64, f32, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind}abd"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind}abd.{sve_type}" } + + - name: svabs[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Absolute value + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64] + assert_instr: ["{type_kind.f}abs"] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "{type_kind.f}abs.{sve_type}" } + + - name: svand[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Bitwise AND + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [and] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op2 + zeroing_method: { select: op1 } + compose: + - LLVMLink: { name: "and.{sve_type}" } + + - name: svandv[_{type}] + attr: [*sve-unstable] + doc: Bitwise AND reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + assert_instr: [andv] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + compose: + - LLVMLink: { name: "andv.{sve_type}" } + + - name: svand[_b]_z + attr: [*sve-unstable] + doc: Bitwise AND + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [and] + compose: + - LLVMLink: { name: "and.z.nvx16i1" } + + - name: svmov[_b]_z + attr: [*sve-unstable] + doc: Move + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: svbool_t + assert_instr: [mov] + compose: + - FnCall: ["svand_b_z", [$pg, $op, $op]] + + - name: svbic[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Bitwise clear + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [bic] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op2 + zeroing_method: { select: op1 } + compose: + - LLVMLink: { name: "bic.{sve_type}" } + + - name: svbic[_b]_z + attr: [*sve-unstable] + doc: Bitwise clear + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [bic] + compose: + - LLVMLink: { name: "bic.z.nvx16i1" } + + - name: sveor[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Bitwise exclusive OR + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [eor] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op2 + zeroing_method: { select: op1 } + compose: + - LLVMLink: { name: "eor.{sve_type}" } + + - name: sveorv[_{type}] + attr: [*sve-unstable] + doc: Bitwise exclusive OR reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + assert_instr: [eorv] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + compose: + - LLVMLink: { name: "eorv.{sve_type}" } + + - name: sveor[_b]_z + attr: [*sve-unstable] + doc: Bitwise exclusive OR + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [eor] + compose: + - LLVMLink: { name: "eor.z.nvx16i1" } + + - name: svnot[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Bitwise invert + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [not] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "not.{sve_type}" } + + - name: svnot[_b]_z + attr: [*sve-unstable] + doc: Bitwise invert + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: svbool_t + assert_instr: [not] + compose: + - FnCall: ["sveor_b_z", [$pg, $op, $pg]] + + - name: svcnot[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Logically invert boolean condition + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [cnot] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "cnot.{sve_type}" } + + - name: svnand[_b]_z + attr: [*sve-unstable] + doc: Bitwise NAND + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [nand] + compose: + - LLVMLink: { name: "nand.z.nxv16i1" } + + - name: svnor[_b]_z + attr: [*sve-unstable] + doc: Bitwise NOR + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [nor] + compose: + - LLVMLink: { name: "nor.z.nxv16i1" } + + - name: svorr[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Bitwise inclusive OR + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [orr] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op2 + zeroing_method: { select: op1 } + compose: + - LLVMLink: { name: "orr.{sve_type}" } + + - name: svorv[_{type}] + attr: [*sve-unstable] + doc: Bitwise inclusive OR reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + assert_instr: [orv] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + compose: + - LLVMLink: { name: "orv.{sve_type}" } + + - name: svorr[_b]_z + attr: [*sve-unstable] + doc: Bitwise inclusive OR + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [orr] + compose: + - LLVMLink: { name: "orr.z.nvx16i1" } + + - name: svorn[_b]_z + attr: [*sve-unstable] + doc: Bitwise inclusive OR, inverting second argument + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [orn] + compose: + - LLVMLink: { name: "orn.z.nvx16i1" } + + - name: svlsl[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Logical shift left + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i8, u8], u8] + - [[i16, u16], u16] + - [[i32, u32], u32] + - [[i64, u64], u64] + assert_instr: [lsl] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "lsl.{sve_type[0]}" } + + - name: svlsl_wide[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Logical shift left + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i8, i16, i32, u8, u16, u32], u64] + assert_instr: [lsl] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "lsl.wide.{sve_type[0]}" } + + - name: svasr[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Arithmetic shift right + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + assert_instr: [asr] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "asr.{sve_type[0]}" } + + - name: svasr_wide[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Arithmetic shift right + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i8, i16, i32], u64] + assert_instr: [asr] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "asr.wide.{sve_type[0]}" } + + - name: svasrd[_n_{type}]{_mxz} + attr: [*sve-unstable] + doc: Arithmetic shift right for divide by immediate + arguments: ["pg: {predicate}", "op1: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size}"] }] + types: [i8, i16, i32, i64] + assert_instr: [[asrd, "IMM2 = 1"]] + zeroing_method: { select: op1 } + compose: + - LLVMLink: + name: "asrd.{sve_type}" + arguments: ["pg: {predicate}", "op1: {sve_type}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$pg, $op1, $IMM2]] + + - name: svlsr[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Logical shift right + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8, u16, u32, u64] + assert_instr: [lsr] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "lsr.{sve_type}" } + + - name: svlsr_wide[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Logical shift right + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[u8, u16, u32], u64] + assert_instr: [lsr] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "lsr.wide.{sve_type[0]}" } + + - name: svadda[_{type}] + attr: [*sve-unstable] + doc: Add reduction (strictly-ordered) + arguments: ["pg: {predicate}", "initial: {type}", "op: {sve_type}"] + return_type: "{type}" + assert_instr: [fadda] + types: [f32, f64] + compose: + - LLVMLink: { name: "fadda.{sve_type}" } + + - name: svaddv[_{type}] + attr: [*sve-unstable] + doc: Add reduction + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i64, u64] + assert_instr: [{ float: faddv, default: uaddv }] + compose: + - LLVMLink: { name: "{type_kind.fsu}addv.{sve_type}" } + + - name: svaddv[_{type[0]}] + attr: [*sve-unstable] + doc: Add reduction + arguments: ["pg: {predicate[0]}", "op: {sve_type[0]}"] + return_type: "{type[1]}" + types: + - [[i8, i16, i32], i64] + - [[u8, u16, u32], u64] + assert_instr: ["{type_kind[0].su}addv"] + compose: + - LLVMLink: { name: "{type_kind[0].su}addv.{sve_type[0]}" } + + - name: svmaxv[_{type}] + attr: [*sve-unstable] + doc: Maximum reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.fsu}maxv"] + compose: + - LLVMLink: { name: "{type_kind.fsu}maxv.{sve_type}" } + + - name: svmaxnmv[_{type}] + attr: [*sve-unstable] + doc: Maximum number reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64] + assert_instr: [fmaxnmv] + compose: + - LLVMLink: { name: "fmaxnmv.{sve_type}" } + + - name: svminv[_{type}] + attr: [*sve-unstable] + doc: Minimum reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.fsu}minv"] + compose: + - LLVMLink: { name: "{type_kind.fsu}minv.{sve_type}" } + + - name: svminnmv[_{type}] + attr: [*sve-unstable] + doc: Minimum number reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64] + assert_instr: [fminnmv] + compose: + - LLVMLink: { name: "fminnmv.{sve_type}" } + + - name: svmul[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: ["{type_kind.f}mul"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}mul.{sve_type}" } + + - name: svmulh[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply, returning high-half + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: ["{type_kind.su}mulh"] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}mulh.{sve_type}" } + + - name: svmulx[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply extended (∞×0=2) + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: ["fmulx"] + types: [f32, f64] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "fmulx.{sve_type}" } + + - name: svrecpe[_{type}] + attr: [*sve-unstable] + doc: Reciprocal estimate + arguments: ["op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frecpe] + compose: + - LLVMLink: { name: "frecpe.x.{sve_type}" } + + - name: svrecps[_{type}] + attr: [*sve-unstable] + doc: Reciprocal step + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frecps] + compose: + - LLVMLink: { name: "frecps.x.{sve_type}" } + + - name: svrecpx[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reciprocal exponent + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frecpx] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frecpx.x.{sve_type}" } + + - name: svrsqrte[_{type}] + attr: [*sve-unstable] + doc: Reciprocal square root estimate + arguments: ["op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frsqrte] + compose: + - LLVMLink: { name: "frsqrte.x.{sve_type}" } + + - name: svrsqrts[_{type}] + attr: [*sve-unstable] + doc: Reciprocal square root step + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frsqrts] + compose: + - LLVMLink: { name: "frsqrts.x.{sve_type}" } + + - name: svmad[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply-add, multiplicand first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: ["{type_kind.f}mad"] + compose: + - LLVMLink: { name: "{type_kind.f}mad.{sve_type}" } + + - name: svmla[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply-add, addend first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: ["{type_kind.f}mla"] + compose: + - LLVMLink: { name: "{type_kind.f}mla.{sve_type}" } + + - name: svmla_lane[_{type}] + attr: [*sve-unstable] + doc: Multiply-add, addend first + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + types: [f32, f64] + assert_instr: [[fmla, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "fmla.lane.{sve_type}" + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmls[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply-subtract, minuend first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: ["{type_kind.f}mls"] + compose: + - LLVMLink: { name: "{type_kind.f}mls.{sve_type}" } + + - name: svmls_lane[_{type}] + attr: [*sve-unstable] + doc: Multiply-subtract, minuend first + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + types: [f32, f64] + assert_instr: [[fmls, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "fmls.lane.{sve_type}" + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmsb[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply-subtract, multiplicand first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: ["{type_kind.f}msb"] + compose: + - LLVMLink: { name: "{type_kind.f}msb.{sve_type}" } + + - name: svnmad[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Negated multiply-add, multiplicand first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: [fnmad] + compose: + - LLVMLink: { name: "fnmad.{sve_type}" } + + - name: svnmla[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Negated multiply-add, addend first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: [fnmla] + compose: + - LLVMLink: { name: "fnmla.{sve_type}" } + + - name: svnmls[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Negated multiply-subtract, minuend first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: [fnmls] + compose: + - LLVMLink: { name: "fnmls.{sve_type}" } + + - name: svnmsb[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Negated multiply-subtract, multiplicand first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: [fnmsb] + compose: + - LLVMLink: { name: "fnmsb.{sve_type}" } + + - name: svneg[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Negate + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64] + assert_instr: ["{type_kind.f}neg"] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "{type_kind.f}neg.{sve_type}" } + + - name: svqadd[{_n}_{type}] + attr: [*sve-unstable] + doc: Saturating add + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.su}qadd"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}qadd.x.{sve_type}" } + + - name: svadr{size_literal[2]}[_{type[0]}base]_[{type[1]}]{index_or_offset} + attr: [*sve-unstable] + substitutions: + index_or_offset: { match_size: "{type[2]}", default: index, byte: offset } + indices_or_offsets: + { match_size: "{type[2]}", default: indices, byte: offsets } + doc: Compute vector addresses for {size[2]}-bit data + arguments: ["bases: {sve_type[0]}", "{indices_or_offsets}: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [u32, [i32, u32], [i8, i16, i32, i64]] + - [u64, [i64, u64], [i8, i16, i32, i64]] + assert_instr: [adr] + compose: + - LLVMLink: { name: "adr{size_literal[2]}.{sve_type[0]}" } + + - name: svdot[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Dot product + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i32, i8] + - [i64, i16] + - [u32, u8] + - [u64, u16] + assert_instr: ["{type_kind[0].su}dot"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}dot.{sve_type[0]}" } + + - name: svdot_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Dot product + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[0]}" }] + types: + - [i32, i8] + - [i64, i16] + - [u32, u8] + - [u64, u16] + assert_instr: [["{type_kind[0].su}dot", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}dot.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "imm_index: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svusdot[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Dot product (unsigned × signed) + target_features: [i8mm] + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"] + return_type: "{sve_type[0]}" + types: [[i32, u8, i8]] + assert_instr: [usdot] + n_variant_op: op3 + compose: + - LLVMLink: { name: "usdot.{sve_type[0]}" } + + - name: svusdot_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Dot product (unsigned × signed) + target_features: [i8mm] + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[0]}" }] + types: [[i32, u8, i8]] + assert_instr: [[usdot, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "usdot.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[2]}" + - "imm_index: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svsudot[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Dot product (signed × unsigned) + target_features: [i8mm] + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"] + return_type: "{sve_type[0]}" + types: [[i32, i8, u8]] + assert_instr: [usdot] + n_variant_op: op3 + compose: + - FnCall: ["svusdot_{type[0]}", [$op1, $op3, $op2]] + + - name: svsudot_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Dot product (signed × unsigned) + target_features: [i8mm] + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[0]}" }] + types: [[i32, i8, u8]] + assert_instr: [[sudot, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sudot.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[2]}" + - "imm_index: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svdiv[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Divide + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i32, i64, u32, u64] + assert_instr: ["{type_kind.fsu}div"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.fsu}div.{sve_type}" } + + - name: svdivr[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Divide reversed + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i32, i64, u32, u64] + assert_instr: ["{type_kind.fsu}divr"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.fsu}divr.{sve_type}" } + + - name: svexpa[_{type[0]}] + attr: [*sve-unstable] + doc: Floating-point exponential accelerator + arguments: ["op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f32, u32], [f64, u64]] + assert_instr: [fexpa] + compose: + - LLVMLink: { name: "fexpa.x.{sve_type[0]} " } + + - name: svscale[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Adjust exponent + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f32, i32], [f64, i64]] + assert_instr: [fscale] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "fscale.{sve_type[0]}" } + + - name: svmmla[_{type}] + attr: [*sve-unstable] + doc: Matrix multiply-accumulate + target_features: [f32mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [f32] + assert_instr: [fmmla] + compose: + - LLVMLink: { name: "fmmla.{sve_type}" } + + - name: svmmla[_{type}] + attr: [*sve-unstable] + doc: Matrix multiply-accumulate + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [f64] + assert_instr: [fmmla] + compose: + - LLVMLink: { name: "fmmla.{sve_type}" } + + - name: svmmla[_{type[0]}] + attr: [*sve-unstable] + doc: Matrix multiply-accumulate + target_features: [i8mm] + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i32, i8], [u32, u8]] + assert_instr: ["{type_kind[0].su}mmla"] + compose: + - LLVMLink: { name: "{type_kind[0].su}mmla.{sve_type[0]}" } + + - name: svusmmla[_{type[0]}] + attr: [*sve-unstable] + doc: Matrix multiply-accumulate (unsigned × signed) + target_features: [i8mm] + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"] + return_type: "{sve_type[0]}" + types: [[i32, u8, i8]] + assert_instr: [usmmla] + compose: + - LLVMLink: { name: "usmmla.{sve_type[0]}" } + + - name: svmin[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Minimum + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind.fsu}min"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.fsu}min.{sve_type}" } + + - name: svminnm[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Minimum number + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { select: op1 } + assert_instr: [fminnm] + n_variant_op: op2 + compose: + - LLVMLink: { name: "fminnm.{sve_type}" } diff --git a/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml b/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml new file mode 100644 index 0000000000..cfd5f5380f --- /dev/null +++ b/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml @@ -0,0 +1,3196 @@ +arch_cfgs: + - arch_name: aarch64 + target_feature: [sve, sve2] + llvm_prefix: llvm.aarch64.sve + +auto_llvm_sign_conversion: true +generate_load_store_tests: true + +# `#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]` +sve-unstable: &sve-unstable + FnCall: [unstable, ['feature = "stdarch_aarch64_sve"', 'issue= "145052"']] + +intrinsics: + - name: svbext[{_n}_{type}] + attr: [*sve-unstable] + target_features: [sve2-bitperm] + doc: Gather lower bits from positions selected by bitmask + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8, u16, u32, u64] + assert_instr: [bext] + n_variant_op: op2 + compose: + - LLVMLink: { name: "bext.x.{sve_type}" } + + - name: svbgrp[{_n}_{type}] + attr: [*sve-unstable] + target_features: [sve2-bitperm] + doc: Group bits to right or left as selected by bitmask + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8, u16, u32, u64] + assert_instr: [bgrp] + n_variant_op: op2 + compose: + - LLVMLink: { name: "bgrp.x.{sve_type}" } + + - name: svbdep[{_n}_{type}] + attr: [*sve-unstable] + target_features: [sve2-bitperm] + doc: Scatter lower bits into positions selected by bitmask + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8, u16, u32, u64] + assert_instr: [bdep] + n_variant_op: op2 + compose: + - LLVMLink: { name: "bdep.x.{sve_type}" } + + - name: svhistcnt[_{type[0]}]_z + attr: [*sve-unstable] + doc: Count matching elements + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: [[i32, u32], [i64, u64], [u32, u32], [u64, u64]] + assert_instr: [histcnt] + compose: + - LLVMLink: { name: "histcnt.{sve_type[0]}" } + + - name: svhistseg[_{type[0]}] + attr: [*sve-unstable] + doc: Count matching elements in 128-bit segments + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: [[i8, u8], [u8, u8]] + assert_instr: [histseg] + compose: + - LLVMLink: { name: "histseg.{sve_type[0]}" } + + - name: svmatch[_{type}] + attr: [*sve-unstable] + doc: Detect any matching elements + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [i8, i16, u8, u16] + assert_instr: [match] + compose: + - LLVMLink: { name: "match.{sve_type}" } + + - name: svnmatch[_{type}] + attr: [*sve-unstable] + doc: Detect no matching elements + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [i8, i16, u8, u16] + assert_instr: [nmatch] + compose: + - LLVMLink: { name: "nmatch.{sve_type}" } + + - name: svhadd[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Halving add + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind.su}hadd"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}hadd.{sve_type}" } + + - name: svrhadd[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Rounding halving add + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind.su}rhadd"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}rhadd.{sve_type}" } + + - name: svaddhnb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add narrow high part (bottom) + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + return_type: "{sve_type[1]}" + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"] + assert_instr: [addhnb] + n_variant_op: op2 + compose: + - LLVMLink: { name: "addhnb.{sve_type[0]}" } + + - name: svaddhnt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add narrow high part (top) + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + return_type: "{sve_type[1]}" + arguments: + ["even: {sve_type[1]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"] + assert_instr: [addhnt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "addhnt.{sve_type[0]}" } + + - name: svraddhnb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Rounding add narrow high part (bottom) + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + return_type: "{sve_type[1]}" + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"] + assert_instr: [raddhnb] + n_variant_op: op2 + compose: + - LLVMLink: { name: "raddhnb.{sve_type[0]}" } + + - name: svraddhnt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Rounding add narrow high part (top) + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + return_type: "{sve_type[1]}" + arguments: + ["even: {sve_type[1]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"] + assert_instr: [raddhnt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "raddhnt.{sve_type[0]}" } + + - name: svcadd[_{type}] + attr: [*sve-unstable] + doc: Complex add with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: IMM_ROTATION, any_values: [90, 270] }] + assert_instr: [[cadd, "IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: cadd.x.{sve_type} + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_rotation: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM_ROTATION]] + + - name: svcdot[_{type[0]}] + attr: [*sve-unstable] + doc: Complex dot product + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i32, i8], [i64, i16]] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }] + assert_instr: [[cdot, "IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: cdot.{sve_type[0]} + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_ROTATION]] + + - name: svcdot_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Complex dot product + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i32, i8], [i64, i16]] + static_defs: ["const IMM_INDEX: i32", "const IMM_ROTATION: i32"] + constraints: + - { variable: IMM_INDEX, vec_max_elems_type: "{type[0]}" } + - { variable: IMM_ROTATION, any_values: [0, 90, 180, 270] } + assert_instr: [[cdot, "IMM_INDEX = 0, IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: cdot.lane.{sve_type[0]} + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "imm_index: i32" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX, $IMM_ROTATION]] + + - name: svcmla[_{type}] + attr: [*sve-unstable] + doc: Complex multiply-add with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }] + assert_instr: [[cmla, "IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: cmla.x.{sve_type} + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_ROTATION]] + + - name: svcmla_lane[_{type}] + attr: [*sve-unstable] + doc: Complex multiply-add with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i16, i32, u16, u32] + static_defs: ["const IMM_INDEX: i32", "const IMM_ROTATION: i32"] + constraints: + - variable: IMM_INDEX + range: { match_size: "{type}", default: [0, 1], halfword: [0, 3] } + - { variable: IMM_ROTATION, any_values: [0, 90, 180, 270] } + assert_instr: [[cmla, "IMM_INDEX = 0, IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: cmla.lane.x.{sve_type} + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "imm_index: i32" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX, $IMM_ROTATION]] + + - name: svqrdcmlah[_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling complex multiply-add high with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }] + assert_instr: [[sqrdcmlah, "IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: sqrdcmlah.x.{sve_type} + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_ROTATION]] + + - name: svqrdcmlah_lane[_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling complex multiply-add high with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i16, i32] + static_defs: ["const IMM_INDEX: i32", "const IMM_ROTATION: i32"] + constraints: + - variable: IMM_INDEX + range: { match_size: "{type}", default: [0, 1], halfword: [0, 3] } + - { variable: IMM_ROTATION, any_values: [0, 90, 180, 270] } + assert_instr: [[sqrdcmlah, "IMM_INDEX = 0, IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: sqrdcmlah.lane.x.{sve_type} + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "imm_index: i32" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX, $IMM_ROTATION]] + + - name: svqcadd[_{type}] + attr: [*sve-unstable] + doc: Saturating complex add with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: "IMM_ROTATION", any_values: [90, 270] }] + assert_instr: [[sqcadd, "IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: "sqcadd.x.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_rotation: i32"] + - FnCall: ["{llvm_link}", ["$op1", "$op2", "$IMM_ROTATION"]] + + - name: svsublb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}sublb"] + n_variant_op: op2 + compose: + - LLVMLink: + name: "{type_kind[0].su}sublb.{sve_type[0]}" + + - name: svsublbt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract long (bottom - top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + assert_instr: [ssublbt] + n_variant_op: op2 + compose: + - LLVMLink: + name: "ssublbt.{sve_type[0]}" + + - name: svsublt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}sublt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}sublt.{sve_type[0]}" } + + - name: svsubltb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract long (top - bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + assert_instr: [ssubltb] + n_variant_op: op2 + compose: + - LLVMLink: + name: "ssubltb.{sve_type[0]}" + + - name: svsubwb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract wide (bottom) + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}subwb"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}subwb.{sve_type[0]}" } + + - name: svsubwt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract wide (top) + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}subwt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}subwt.{sve_type[0]}" } + + - name: svrsubhnb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Rounding subtract narrow high part (bottom) + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [rsubhnb] + n_variant_op: op2 + compose: + - LLVMLink: { name: "rsubhnb.{sve_type[0]}" } + + - name: svrsubhnt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Rounding subtract narrow high part (top) + arguments: + ["even: {sve_type[1]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [rsubhnt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "rsubhnt.{sve_type[0]}" } + + - name: svsubhnb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract narrow high part (bottom) + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [subhnb] + n_variant_op: op2 + compose: + - LLVMLink: { name: "subhnb.{sve_type[0]}" } + + - name: svsubhnt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract narrow high part (top) + arguments: + ["even: {sve_type[1]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [subhnt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "subhnt.{sve_type[0]}" } + + - name: svsbclb[{_n}_{type}] + attr: [*sve-unstable] + doc: Subtract with borrow long (bottom) + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [u32, u64] + assert_instr: [sbclb] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sbclb.{sve_type}" } + + - name: svsbclt[{_n}_{type}] + attr: [*sve-unstable] + doc: Subtract with borrow long (top) + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [u32, u64] + assert_instr: [sbclt] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sbclt.{sve_type}" } + + - name: svqsub[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Saturating subtract + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind.su}qsub"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}qsub.{sve_type}" } + + - name: svqsubr[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Saturating subtract reversed + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind.su}qsubr"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}qsubr.{sve_type}" } + + - name: svhsub[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Halving subtract + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.su}hsub"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}hsub.{sve_type}" } + + - name: svhsubr[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Halving subtract reversed + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.su}hsub"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}hsubr.{sve_type}" } + + - name: svwhilege_{sve_type[1]}[_{type[0]}] + attr: [*sve-unstable] + doc: While decrementing scalar is greater than or equal to + arguments: ["op1: {type[0]}", "op2: {type[0]}"] + return_type: "{sve_type[1]}" + types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]] + assert_instr: [{ default: whilege, unsigned: whilehs }] + compose: + - MatchKind: + - "{type[0]}" + - default: { LLVMLink: { name: "whilege.{sve_type[1]}.{type[0]}" } } + unsigned: { LLVMLink: { name: "whilehs.{sve_type[1]}.{type[0]}" } } + + - name: svwhilegt_{sve_type[1]}[_{type[0]}] + attr: [*sve-unstable] + doc: While decrementing scalar is greater than + arguments: ["op1: {type[0]}", "op2: {type[0]}"] + return_type: "{sve_type[1]}" + types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]] + assert_instr: [{ default: whilegt, unsigned: whilehi }] + compose: + - MatchKind: + - "{type[0]}" + - default: { LLVMLink: { name: "whilegt.{sve_type[1]}.{type[0]}" } } + unsigned: { LLVMLink: { name: "whilehi.{sve_type[1]}.{type[0]}" } } + + - name: svwhilerw_{size}ptr + attr: [*sve-unstable] + safety: + unsafe: [] + visibility: private + static_defs: [T] + substitutions: + size_alt: + match_size: "{type}" + byte: b + halfword: h + default: s + doubleword: d + arguments: ["op1: *T", "op2: *T"] + return_type: "{predicate}" + types: [i8, i16, i32, i64] + assert_instr: [] + compose: + - Let: [op1, CastAs: [$op1, "*const crate::ffi::c_void"]] + - Let: [op2, CastAs: [$op2, "*const crate::ffi::c_void"]] + - LLVMLink: + name: "whilerw.{size_alt}.{predicate}.p0" + arguments: ["op1: *crate::ffi::c_void", "op2: *crate::ffi::c_void"] + + - name: svwhilerw[_{type}] + attr: [*sve-unstable] + doc: While free of read-after-write conflicts + # TODO: This might be safe even with unrelated pointers, but the LLVM builtin's guarantees don't + # seem to be documented, so we conservatively keep this unsafe for now. + safety: + unsafe: + - custom: "[`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints + must be met for at least the base pointers, `op1` and `op2`." + arguments: ["op1: *{type}", "op2: *{type}"] + return_type: "svbool_t" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [whilerw] + compose: + - FnCall: + - "svwhilerw_{size}ptr" + - - $op1 + - $op2 + - - Type: "{type}" + + - name: svwhilewr_{size}ptr + attr: [*sve-unstable] + safety: + unsafe: [] + visibility: private + static_defs: [T] + substitutions: + size_alt: + match_size: "{type}" + byte: b + halfword: h + default: s + doubleword: d + arguments: ["op1: *T", "op2: *T"] + return_type: "{predicate}" + types: [i8, i16, i32, i64] + assert_instr: [] + compose: + - Let: [op1, CastAs: [$op1, "*const crate::ffi::c_void"]] + - Let: [op2, CastAs: [$op2, "*const crate::ffi::c_void"]] + - LLVMLink: + name: "whilewr.{size_alt}.{predicate}.p0" + arguments: ["op1: *crate::ffi::c_void", "op2: *crate::ffi::c_void"] + + - name: svwhilewr[_{type}] + attr: [*sve-unstable] + doc: While free of write-after-read conflicts + # TODO: This might be safe even with unrelated pointers, but the LLVM builtin's guarantees don't + # seem to be documented, so we conservatively keep this unsafe for now. + safety: + unsafe: + - custom: "[`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints + must be met for at least the base pointers, `op1` and `op2`." + arguments: ["op1: *{type}", "op2: *{type}"] + return_type: "svbool_t" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [whilewr] + compose: + - FnCall: + - "svwhilewr_{size}ptr" + - - $op1 + - $op2 + - - Type: "{type}" + + - name: svtbl2[_{type[0]}] + attr: [*sve-unstable] + doc: Table lookup in two-vector table + arguments: ["data: {sve_type_x2[0]}", "indices: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [f32, u32] + - [f64, u64] + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + - [u8, u8] + - [u16, u16] + - [u32, u32] + - [u64, u64] + assert_instr: [tbl] + compose: + - LLVMLink: + name: "tbl2.{sve_type[0]}" + arguments: + - "data0: {sve_type[0]}" + - "data1: {sve_type[0]}" + - "indices: {sve_type[1]}" + - FnCall: + - "{llvm_link}" + - - FnCall: ["svget2_{type[0]}", ["$data"], [0]] + - FnCall: ["svget2_{type[0]}", ["$data"], [1]] + - $indices + + - name: svtbx[_{type[0]}] + attr: [*sve-unstable] + doc: Table lookup in single-vector table (merging) + arguments: + - "fallback: {sve_type[0]}" + - "data: {sve_type[0]}" + - "indices: {sve_type[1]}" + return_type: "{sve_type[0]}" + types: + - [f32, u32] + - [f64, u64] + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + - [u8, u8] + - [u16, u16] + - [u32, u32] + - [u64, u64] + assert_instr: [tbx] + compose: + - LLVMLink: { name: "tbx.{sve_type[0]}" } + + - name: svcvtlt_{type[0]}[_{type[1]}]_m + attr: [*sve-unstable] + doc: Up convert long (top) + arguments: + ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f64, f32]] + assert_instr: [fcvtlt] + compose: + - LLVMLink: { name: "fcvtlt.{type[0]}{type[1]}" } + + - name: svcvtlt_{type[0]}[_{type[1]}]_x + attr: [*sve-unstable] + doc: Up convert long (top) + arguments: ["pg: svbool_t", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f64, f32]] + assert_instr: [fcvtlt] + compose: + - FnCall: + - "svcvtlt_{type[0]}_{type[1]}_m" + - - FnCall: ["crate::intrinsics::transmute_unchecked", [$op], [], true] + - $pg + - $op + + - name: svcvtnt_{type[0]}[_{type[1]}]{_mx} + attr: [*sve-unstable] + doc: Down convert and narrow (top) + arguments: + ["even: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f32, f64]] + assert_instr: [fcvtnt] + compose: + - LLVMLink: { name: "fcvtnt.{type[0]}{type[1]}" } + + - name: svcvtx_{type[0]}[_{type[1]}]{_mxz} + attr: [*sve-unstable] + doc: Down convert, rounding to odd + arguments: + ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f32, f64]] + zeroing_method: { drop: inactive } + assert_instr: [fcvtx] + compose: + - LLVMLink: { name: "fcvtx.{type[0]}{type[1]}" } + + - name: svcvtxnt_{type[0]}[_{type[1]}]{_mx} + attr: [*sve-unstable] + doc: Down convert, rounding to odd (top) + arguments: + ["even: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f32, f64]] + assert_instr: [fcvtxnt] + compose: + - LLVMLink: { name: "fcvtxnt.{type[0]}{type[1]}" } + + - name: svldnt1_gather_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate[0]}", "base: *{type[1]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["ldnt1{size_literal[0]}"] + test: { load: 1 } + compose: + - LLVMLink: { name: "ldnt1.gather.index.{sve_type[1]}" } + + - name: svldnt1_gather_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate[0]}", "base: *{type[1]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["ldnt1{size_literal[0]}"] + test: { load: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: { name: "ldnt1.gather.uxtw.{sve_type[1]}" } + doubleword: + LLVMLink: { name: "ldnt1.gather.{sve_type[1]}" } + + - name: svldnt1_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ldnt1{size_literal[0]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ldnt1.gather.scalar.offset.{sve_type[1]}.{sve_type[0]}" + + - name: svldnt1_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ldnt1{size_literal[0]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldnt1_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svldnt1_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ldnt1{size_literal[0]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldnt1_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]] + + - name: svldnt1s{size_literal[2]}_gather_[{type[0]}]index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i64, u64], [i64, u64], [i16, i32]] + assert_instr: ["ldnt1s{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ldnt1.gather.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - "crate::intrinsics::simd::scalable::sve_cast" + - - FnCall: ["{llvm_link}", [$pg, $base, $indices]] + + - name: svldnt1u{size_literal[2]}_gather_[{type[0]}]index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i64, u64], [u64, i64], [u16, u32]] + assert_instr: ["ldnt1{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ldnt1.gather.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - "crate::intrinsics::simd::scalable::sve_cast" + - - FnCall: ["{llvm_link}", [$pg, $base, $indices]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svldnt1s{size_literal[2]}_gather_[{type[0]}]offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [[i64, u64], [i64, u64], [i8, i16, i32]] + assert_instr: ["ldnt1s{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldnt1.gather.uxtw.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ldnt1.gather.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - "crate::intrinsics::simd::scalable::sve_cast" + - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]] + + - name: svldnt1u{size_literal[2]}_gather_[{type[0]}]offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [u32, i32], [u8, u16]] + - [[i64, u64], [u64, i64], [u8, u16, u32]] + assert_instr: ["ldnt1{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldnt1.gather.uxtw.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ldnt1.gather.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - "crate::intrinsics::simd::scalable::sve_cast" + - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svldnt1s{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["ldnt1s{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ldnt1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - "crate::intrinsics::simd::scalable::sve_cast" + - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]] + + - name: svldnt1u{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [u32, i32], [u8, u16]] + - [u64, [u64, i64], [u8, u16, u32]] + assert_instr: ["ldnt1{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ldnt1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - "crate::intrinsics::simd::scalable::sve_cast" + - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svldnt1s{size_literal[2]}_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["ldnt1s{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldnt1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svldnt1u{size_literal[2]}_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [u8, u16]] + - [u64, [i64, u64], [u8, u16, u32]] + assert_instr: ["ldnt1{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldnt1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svldnt1s{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], u16] + - [u64, [i64, u64], [u16, u32]] + assert_instr: ["ldnt1s{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldnt1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + + - name: svldnt1u{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], u16] + - [u64, [i64, u64], [u16, u32]] + assert_instr: ["ldnt1{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldnt1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + + - name: svstnt1_scatter_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "indices: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["stnt1{size_literal[0]}"] + test: { store: 1 } + compose: + - LLVMLink: + name: "stnt1.scatter.index.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "indices: {sve_type[0]}" + - FnCall: ["{llvm_link}", [$data, $pg, $base, $indices]] + + - name: svstnt1_scatter_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "offsets: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["stnt1{size_literal[0]}"] + test: { store: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "stnt1.scatter.uxtw.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "offsets: {sve_type[0]}" + doubleword: + LLVMLink: + name: "stnt1.scatter.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "offsets: {sve_type[0]}" + - FnCall: ["{llvm_link}", [$data, $pg, $base, $offsets]] + + - name: svstnt1_scatter[_{type[0]}base]_offset[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + - "data: {sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["stnt1{size_literal[0]}"] + test: { store: 1 } + compose: + - LLVMLink: + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + name: "stnt1.scatter.scalar.offset.{sve_type[1]}.{sve_type[0]}" + - FnCall: ["{llvm_link}", [$data, $pg, $bases, $offset]] + + - name: svstnt1_scatter[_{type[0]}base_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: + ["pg: {predicate[0]}", "bases: {sve_type[0]}", "data: {sve_type[1]}"] + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["stnt1{size_literal[0]}"] + test: { store: 1 } + compose: + - FnCall: + - "svstnt1_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + - $data + + - name: svstnt1_scatter[_{type[0]}base]_index[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "index: i64" + - "data: {sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["stnt1{size_literal[0]}"] + test: { store: 1 } + compose: + - FnCall: + - "svstnt1_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]] + - $data + + - name: svstnt1{size_literal[2]}_scatter_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "indices: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [[i64, u64], i64, [i16, i32]] + - [[i64, u64], u64, [u16, u32]] + assert_instr: ["stnt1{size_literal[2]}"] + test: { store: 2 } + compose: + - LLVMLink: + name: "stnt1.scatter.index.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "indices: {sve_type[0]}" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::scalable::sve_cast", [$data]], $pg, $base, $indices] + + - name: svstnt1{size_literal[2]}_scatter_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "offsets: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [u32, i32, [i8, i16]] + - [u32, u32, [u8, u16]] + - [[i64, u64], i64, [i8, i16, i32]] + - [[i64, u64], u64, [u8, u16, u32]] + assert_instr: ["stnt1{size_literal[2]}"] + test: { store: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "stnt1.scatter.uxtw.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "offsets: {sve_type[0]}" + doubleword: + LLVMLink: + name: "stnt1.scatter.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "offsets: {sve_type[0]}" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::scalable::sve_cast", [$data]], $pg, $base, $offsets] + + - name: svstnt1{size_literal[2]}_scatter[_{type[0]}base]_offset[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + - "data: {sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["stnt1{size_literal[2]}"] + test: { store: 2 } + compose: + - LLVMLink: + name: "stnt1.scatter.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::scalable::sve_cast", [$data]], $pg, $bases, $offset] + + - name: svstnt1{size_literal[2]}_scatter[_{type[0]}base_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: + ["pg: {predicate[0]}", "bases: {sve_type[0]}", "data: {sve_type[1]}"] + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["stnt1{size_literal[2]}"] + test: { store: 2 } + compose: + - FnCall: + - "svstnt1{size_literal[2]}_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + - $data + + - name: svstnt1{size_literal[2]}_scatter[_{type[0]}base]_index[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "index: i64" + - "data: {sve_type[1]}" + types: + - [u32, [i32, u32], i16] + - [u64, [i64, u64], [i16, i32]] + assert_instr: ["stnt1{size_literal[2]}"] + test: { store: 2 } + compose: + - FnCall: + - "svstnt1{size_literal[2]}_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + - $data + + - name: svaba[{_n}_{type}] + attr: [*sve-unstable] + doc: Absolute difference and accumulate + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind}aba"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind}aba.{sve_type}" } + + - name: svqabs[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Saturating absolute value + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + assert_instr: [sqabs] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "sqabs.{sve_type}" } + + - name: svabdlb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Absolute difference long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}abdlb"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}abdlb.{sve_type[0]}" } + + - name: svabdlt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Absolute difference long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}abdlt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}abdlt.{sve_type[0]}" } + + - name: svabalb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Absolute difference long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}abalb"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}abalb.{sve_type[0]}" } + + - name: svabalt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Absolute difference long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}abalt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}abalt.{sve_type[0]}" } + + - name: svbcax[{_n}_{type}] + attr: [*sve-unstable] + doc: Bitwise clear and exclusive OR + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [bcax] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op3 + compose: + - LLVMLink: { name: "bcax.{sve_type}" } + + - name: sveorbt[{_n}_{type}] + attr: [*sve-unstable] + doc: Interleaving exclusive OR (bottom, top) + arguments: ["odd: {sve_type}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [eorbt] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op2 + compose: + - LLVMLink: { name: "eorbt.{sve_type}" } + + - name: sveortb[{_n}_{type}] + attr: [*sve-unstable] + doc: Interleaving exclusive OR (top, bottom) + arguments: ["even: {sve_type}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [eortb] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op2 + compose: + - LLVMLink: { name: "eortb.{sve_type}" } + + - name: sveor3[{_n}_{type}] + attr: [*sve-unstable] + doc: Bitwise exclusive OR of three vectors + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [eor3] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op3 + compose: + - LLVMLink: { name: "eor3.{sve_type}" } + + - name: svbsl[{_n}_{type}] + attr: [*sve-unstable] + doc: Bitwise select + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [bsl] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op3 + compose: + - LLVMLink: { name: "bsl.{sve_type}" } + + - name: svbsl1n[{_n}_{type}] + attr: [*sve-unstable] + doc: Bitwise select with first input inverted + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [bsl1n] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op3 + compose: + - LLVMLink: { name: "bsl1n.{sve_type}" } + + - name: svbsl2n[{_n}_{type}] + attr: [*sve-unstable] + doc: Bitwise select with second input inverted + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [bsl2n] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op3 + compose: + - LLVMLink: { name: "bsl2n.{sve_type}" } + + - name: svnbsl[{_n}_{type}] + attr: [*sve-unstable] + doc: Bitwise select + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [nbsl] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op3 + compose: + - LLVMLink: { name: "nbsl.{sve_type}" } + + - name: svxar[_n_{type}] + attr: [*sve-unstable] + doc: Bitwise exclusive OR and rotate right + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, range: ["1", "{size}"] }] + assert_instr: [[xar, "IMM3 = 1"]] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + compose: + - LLVMLink: + name: "xar.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]] + + - name: svrax1[_{type}] + attr: [*sve-unstable] + doc: Bitwise rotate left by 1 and exclusive OR + target_features: [sve2-sha3] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [rax1] + types: [i64, u64] + compose: + - LLVMLink: { name: "rax1" } + + - name: svshllb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Shift left long (bottom) + arguments: ["op1: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["0", "{size_minus_one[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [["{type_kind[0].su}shllb", "IMM2 = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}shllb.{sve_type[0]}" + arguments: ["op1: {sve_type[1]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svshllt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Shift left long (top) + arguments: ["op1: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["0", "{size_minus_one[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [["{type_kind[0].su}shllt", "IMM2 = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}shllt.{sve_type[0]}" + arguments: ["op1: {sve_type[1]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svrshl[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Rounding shift left + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i8, u8], i8] + - [[i16, u16], i16] + - [[i32, u32], i32] + - [[i64, u64], i64] + assert_instr: ["{type_kind[0].su}rshl"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}rshl.{sve_type[0]}" } + + - name: svqrshl[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Saturating rounding shift left + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i8, u8], i8] + - [[i16, u16], i16] + - [[i32, u32], i32] + - [[i64, u64], i64] + assert_instr: ["{type_kind[0].su}qrshl"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}qrshl.{sve_type[0]}" } + + - name: svqshl[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Saturating shift left + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i8, u8], i8] + - [[i16, u16], i16] + - [[i32, u32], i32] + - [[i64, u64], i64] + assert_instr: ["{type_kind[0].su}qshl"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}qshl.{sve_type[0]}" } + + - name: svqshlu[_n_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Saturating shift left unsigned + arguments: ["pg: {predicate[0]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["0", "{size_minus_one[1]}"] }] + types: + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + assert_instr: [[sqshlu, "IMM2 = 0"]] + zeroing_method: { select: op1 } + compose: + - LLVMLink: + name: "sqshlu.{sve_type[0]}" + arguments: ["pg: {predicate[0]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$pg, $op1, $IMM2]] + + - name: svsli[_n_{type}] + attr: [*sve-unstable] + doc: Shift left and insert + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, range: ["0", "{size_minus_one}"] }] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [[sli, "IMM3 = 0"]] + compose: + - LLVMLink: + name: "sli.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]] + + - name: svrshr[_n_{type}]{_mxz} + attr: [*sve-unstable] + doc: Rounding shift right + arguments: ["pg: {predicate}", "op1: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size}"] }] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [["{type_kind.su}rshr", "IMM2 = 1"]] + zeroing_method: { select: op1 } + compose: + - LLVMLink: + name: "{type_kind.su}rshr.{sve_type}" + arguments: ["pg: {predicate}", "op1: {sve_type}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$pg, $op1, $IMM2]] + + - name: svrsra[_n_{type}] + attr: [*sve-unstable] + doc: Rounding shift right and accumulate + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, range: ["1", "{size}"] }] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [["{type_kind.su}rsra", "IMM3 = 1"]] + compose: + - LLVMLink: + name: "{type_kind.su}rsra.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]] + + - name: svrshrnb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Rounding shift right narrow (bottom) + arguments: ["op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [[rshrnb, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "rshrnb.{sve_type[0]}" + arguments: ["op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svrshrnt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Rounding shift right narrow (top) + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [[rshrnt, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "rshrnt.{sve_type[0]}" + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]] + + - name: svqrshrnb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating rounding shift right narrow (bottom) + arguments: ["op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [["{type_kind[0].su}qrshrnb", "IMM2 = 1"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}qrshrnb.{sve_type[0]}" + arguments: ["op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svqrshrnt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating rounding shift right narrow (top) + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [["{type_kind[0].su}qrshrnt", "IMM2 = 1"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}qrshrnt.{sve_type[0]}" + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]] + + - name: svqrshrunb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating rounding shift right unsigned narrow (bottom) + arguments: ["op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, u8] + - [i32, u16] + - [i64, u32] + assert_instr: [[sqrshrunb, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "sqrshrunb.{sve_type[0]}" + arguments: ["op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svqrshrunt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating rounding shift right unsigned narrow (top) + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, u8] + - [i32, u16] + - [i64, u32] + assert_instr: [[sqrshrunt, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "sqrshrunt.{sve_type[0]}" + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]] + + - name: svqshrnb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating shift right narrow (bottom) + arguments: ["op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [["{type_kind[0].su}qshrnb", "IMM2 = 1"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}qshrnb.{sve_type[0]}" + arguments: ["op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svqshrnt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating shift right narrow (top) + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [["{type_kind[0].su}qshrnt", "IMM2 = 1"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}qshrnt.{sve_type[0]}" + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]] + + - name: svqshrunb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating shift right unsigned narrow (bottom) + arguments: ["op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, u8] + - [i32, u16] + - [i64, u32] + assert_instr: [[sqshrunb, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "sqshrunb.{sve_type[0]}" + arguments: ["op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svqshrunt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating shift right unsigned narrow (top) + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, u8] + - [i32, u16] + - [i64, u32] + assert_instr: [[sqshrunt, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "sqshrunt.{sve_type[0]}" + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]] + + - name: svsra[_n_{type}] + attr: [*sve-unstable] + doc: Shift right and accumulate + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, range: ["1", "{size}"] }] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [["{type_kind.su}sra", "IMM3 = 1"]] + compose: + - LLVMLink: + name: "{type_kind.su}sra.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]] + + - name: svsri[_n_{type}] + attr: [*sve-unstable] + doc: Shift right and insert + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, range: ["1", "{size}"] }] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [[sri, "IMM3 = 1"]] + compose: + - LLVMLink: + name: "sri.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]] + + - name: svshrnb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Shift right narrow (bottom) + arguments: ["op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [[shrnb, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "shrnb.{sve_type[0]}" + arguments: ["op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svshrnt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Shift right narrow (top) + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [[shrnt, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "shrnt.{sve_type[0]}" + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]] + + - name: svqxtnb[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating extract narrow (bottom) + arguments: ["op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}qxtnb"] + compose: + - LLVMLink: { name: "{type_kind[0].su}qxtnb.{sve_type[0]}" } + + - name: svqxtnt[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating extract narrow (top) + arguments: ["even: {sve_type[1]}", "op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}qxtnt"] + compose: + - LLVMLink: { name: "{type_kind[0].su}qxtnt.{sve_type[0]}" } + + - name: svqxtunb[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating extract unsigned narrow (bottom) + arguments: ["op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, u8] + - [i32, u16] + - [i64, u32] + assert_instr: [sqxtunb] + compose: + - LLVMLink: { name: "sqxtunb.{sve_type[0]}" } + + - name: svqxtunt[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating extract unsigned narrow (top) + arguments: ["even: {sve_type[1]}", "op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, u8] + - [i32, u16] + - [i64, u32] + assert_instr: [sqxtunt] + compose: + - LLVMLink: { name: "sqxtunt.{sve_type[0]}" } + + - name: svmovlb[_{type[0]}] + attr: [*sve-unstable] + doc: Move long (bottom) + arguments: ["op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}shllb"] + compose: + - FnCall: ["svshllb_n_{type[0]}", [$op], [0]] + + - name: svmovlt[_{type[0]}] + attr: [*sve-unstable] + doc: Move long (top) + arguments: ["op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}shllt"] + compose: + - FnCall: ["svshllt_n_{type[0]}", [$op], [0]] + + - name: svunpkhi[_{type[0]}] + attr: [*sve-unstable] + doc: Unpack and extend high half + arguments: ["op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}unpkhi"] + compose: + - LLVMLink: { name: "{type_kind[0].su}unpkhi.{sve_type[0]}" } + + - name: svunpkhi[_b] + attr: [*sve-unstable] + doc: Unpack and extend high half + arguments: ["op: svbool_t"] + return_type: "svbool8_t" + assert_instr: [punpkhi] + compose: + - LLVMLink: { name: "punpkhi.nxv16i1" } + + - name: svunpklo[_{type[0]}] + attr: [*sve-unstable] + doc: Unpack and extend low half + arguments: ["op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}unpklo"] + compose: + - LLVMLink: { name: "{type_kind[0].su}unpklo.{sve_type[0]}" } + + - name: svunpklo[_b] + attr: [*sve-unstable] + doc: Unpack and extend low half + arguments: ["op: svbool_t"] + return_type: "svbool8_t" + assert_instr: [punpklo] + compose: + - LLVMLink: { name: "punpklo.nxv16i1" } + + - name: svaddp[_{type}]{_mx} + attr: [*sve-unstable] + doc: Add pairwise + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.f}addp"] + compose: + - LLVMLink: { name: "{type_kind.f}addp.{sve_type}" } + + - name: svadalp[_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Add and accumulate long pairwise + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}adalp"] + zeroing_method: { select: op1 } + compose: + - LLVMLink: { name: "{type_kind[0].su}adalp.{sve_type[0]}" } + + - name: svmaxp[_{type}]{_mx} + attr: [*sve-unstable] + doc: Maximum pairwise + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.fsu}maxp"] + compose: + - LLVMLink: { name: "{type_kind.fsu}maxp.{sve_type}" } + + - name: svmaxnmp[_{type}]{_mx} + attr: [*sve-unstable] + doc: Maximum number pairwise + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: ["fmaxnmp"] + compose: + - LLVMLink: { name: "fmaxnmp.{sve_type}" } + + - name: svminp[_{type}]{_mx} + attr: [*sve-unstable] + doc: Minimum pairwise + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.fsu}minp"] + compose: + - LLVMLink: { name: "{type_kind.fsu}minp.{sve_type}" } + + - name: svminnmp[_{type}]{_mx} + attr: [*sve-unstable] + doc: Minimum number pairwise + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: ["fminnmp"] + compose: + - LLVMLink: { name: "fminnmp.{sve_type}" } + + - name: svmul_lane[_{type}] + attr: [*sve-unstable] + doc: Multiply + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + assert_instr: [["{type_kind.f}mul", "IMM_INDEX = 0"]] + types: [f32, f64, i16, i32, i64, u16, u32, u64] + compose: + - LLVMLink: + name: "{type_kind.f}mul.lane.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]] + + - name: svqdmulh[{_n}_{type}] + attr: [*sve-unstable] + doc: Saturating doubling multiply high + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + assert_instr: [sqdmulh] + n_variant_op: op2 + compose: + - LLVMLink: { name: "sqdmulh.{sve_type}" } + + - name: svqdmulh_lane[_{type}] + attr: [*sve-unstable] + doc: Saturating doubling multiply high + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + assert_instr: [["sqdmulh", "IMM_INDEX = 0"]] + types: [i16, i32, i64] + compose: + - LLVMLink: + name: "sqdmulh.lane.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]] + + - name: svqrdmulh[{_n}_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling multiply high + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + assert_instr: [sqrdmulh] + n_variant_op: op2 + compose: + - LLVMLink: { name: "sqrdmulh.{sve_type}" } + + - name: svqrdmulh_lane[_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling multiply high + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + assert_instr: [["sqrdmulh", "IMM_INDEX = 0"]] + types: [i16, i32, i64] + compose: + - LLVMLink: + name: "sqrdmulh.lane.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]] + + - name: svqdmullb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: [sqdmullb] + n_variant_op: op2 + compose: + - LLVMLink: { name: "sqdmullb.{sve_type[0]}" } + + - name: svqdmullb_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + assert_instr: [["sqdmullb", "IMM_INDEX = 0"]] + types: [[i32, i16], [i64, i32]] + compose: + - LLVMLink: + name: "sqdmullb.lane.{sve_type[0]}" + arguments: + ["op1: {sve_type[1]}", "op2: {sve_type[1]}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]] + + - name: svqdmullt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: [sqdmullt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "sqdmullt.{sve_type[0]}" } + + - name: svqdmullt_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + assert_instr: [["sqdmullt", "IMM_INDEX = 0"]] + types: [[i32, i16], [i64, i32]] + compose: + - LLVMLink: + name: "sqdmullt.lane.{sve_type[0]}" + arguments: + ["op1: {sve_type[1]}", "op2: {sve_type[1]}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]] + + - name: svmullb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Multiply long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}mullb"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}mullb.{sve_type[0]}" } + + - name: svmullb_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Multiply long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i32, i16] + - [i64, i32] + - [u32, u16] + - [u64, u32] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + assert_instr: [["{type_kind[0].su}mullb", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}mullb.lane.{sve_type[0]}" + arguments: + ["op1: {sve_type[1]}", "op2: {sve_type[1]}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM_INDEX]] + + - name: svmullt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Multiply long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}mullt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}mullt.{sve_type[0]}" } + + - name: svmullt_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Multiply long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i32, i16] + - [i64, i32] + - [u32, u16] + - [u64, u32] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + assert_instr: [["{type_kind[0].su}mullt", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}mullt.lane.{sve_type[0]}" + arguments: + ["op1: {sve_type[1]}", "op2: {sve_type[1]}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM_INDEX]] + + - name: svrecpe[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reciprocal estimate + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [u32] + assert_instr: [urecpe] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "urecpe.{sve_type}" } + + - name: svrsqrte[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reciprocal square root estimate + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [u32] + assert_instr: [ursqrte] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "ursqrte.{sve_type}" } + + - name: svmla_lane[_{type}] + attr: [*sve-unstable] + doc: Multiply-add, addend first + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + types: [i16, i32, i64, u16, u32, u64] + assert_instr: [[mla, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "mla.lane.{sve_type}" + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmls_lane[_{type}] + attr: [*sve-unstable] + doc: Multiply-subtract, minuend first + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + types: [i16, i32, i64, u16, u32, u64] + assert_instr: [[mls, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "mls.lane.{sve_type}" + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmlalb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-add long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + assert_instr: ["{type_kind[0].su}mlalb"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}mlalb.{sve_type[0]}" } + + - name: svmlalb_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-add long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32], [u32, u16], [u64, u32]] + assert_instr: [["{type_kind[0].su}mlalb", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}mlalb.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmlalt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-add long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + assert_instr: ["{type_kind[0].su}mlalt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}mlalt.{sve_type[0]}" } + + - name: svmlalt_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-add long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32], [u32, u16], [u64, u32]] + assert_instr: [["{type_kind[0].su}mlalt", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}mlalt.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmlslb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-subtract long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + assert_instr: ["{type_kind[0].su}mlslb"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}mlslb.{sve_type[0]}" } + + - name: svmlslb_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-subtract long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32], [u32, u16], [u64, u32]] + assert_instr: [["{type_kind[0].su}mlslb", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}mlslb.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmlslt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-subtract long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + assert_instr: ["{type_kind[0].su}mlslt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}mlslt.{sve_type[0]}" } + + - name: svmlslt_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-subtract long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32], [u32, u16], [u64, u32]] + assert_instr: [["{type_kind[0].su}mlslt", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}mlslt.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqrdmlah[{_n}_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling multiply-add high + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + assert_instr: [sqrdmlah] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqrdmlah.{sve_type}" } + + - name: svqrdmlah_lane[_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling multiply-add high + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + types: [i16, i32, i64] + assert_instr: [[sqrdmlah, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sqrdmlah.lane.{sve_type}" + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqrdmlsh[{_n}_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling multiply-subtract high + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + assert_instr: [sqrdmlsh] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqrdmlsh.{sve_type}" } + + - name: svqrdmlsh_lane[_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling multiply-subtract high + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + types: [i16, i32, i64] + assert_instr: [[sqrdmlsh, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sqrdmlsh.lane.{sve_type}" + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqdmlalb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-add long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: ["sqdmlalb"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqdmlalb.{sve_type[0]}" } + + - name: svqdmlalb_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-add long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32]] + assert_instr: [["sqdmlalb", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sqdmlalb.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqdmlalbt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-add long (bottom × top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: ["sqdmlalbt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqdmlalbt.{sve_type[0]}" } + + - name: svqdmlalt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-add long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: ["sqdmlalt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqdmlalt.{sve_type[0]}" } + + - name: svqdmlalt_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-add long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32]] + assert_instr: [["sqdmlalt", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sqdmlalt.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqdmlslb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-subtract long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: ["sqdmlslb"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqdmlslb.{sve_type[0]}" } + + - name: svqdmlslb_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-subtract long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32]] + assert_instr: [["sqdmlslb", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sqdmlslb.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqdmlslbt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-subtract long (bottom × top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: ["sqdmlslbt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqdmlslbt.{sve_type[0]}" } + + - name: svqdmlslt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-subtract long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: ["sqdmlslt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqdmlslt.{sve_type[0]}" } + + - name: svqdmlslt_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-subtract long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32]] + assert_instr: [["sqdmlslt", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sqdmlslt.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqneg[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Saturating negate + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + assert_instr: [sqneg] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "sqneg.{sve_type}" } + + - name: svadclb[{_n}_{type}] + attr: [*sve-unstable] + doc: Add with carry long (bottom) + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [u32, u64] + assert_instr: [adclb] + n_variant_op: op3 + compose: + - LLVMLink: { name: "adclb.{sve_type}" } + + - name: svadclt[{_n}_{type}] + attr: [*sve-unstable] + doc: Add with carry long (top) + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [u32, u64] + assert_instr: [adclt] + n_variant_op: op3 + compose: + - LLVMLink: { name: "adclt.{sve_type}" } + + - name: svqadd[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Saturating add + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.su}qadd"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}qadd.{sve_type}" } + + - name: svsqadd[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Saturating add with signed addend + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [u8, i8] + - [u16, i16] + - [u32, i32] + - [u64, i64] + assert_instr: [usqadd] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "usqadd.{sve_type[0]}" } + + - name: svuqadd[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Saturating add with unsigned addend + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + assert_instr: [suqadd] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "suqadd.{sve_type[0]}" } + + - name: svaddlb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}addlb"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}addlb.{sve_type[0]}" } + + - name: svaddlbt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add long (bottom + top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + assert_instr: ["{type_kind[0].su}addlbt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}addlbt.{sve_type[0]}" } + + - name: svaddlt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}addlt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}addlt.{sve_type[0]}" } + + - name: svaddwb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add wide (bottom) + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}addwb"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}addwb.{sve_type[0]}" } + + - name: svaddwt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add wide (top) + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}addwt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}addwt.{sve_type[0]}" } + + - name: svlogb[_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Base 2 logarithm as integer + arguments: + ["inactive: {sve_type[1]}", "pg: {predicate[0]}", "op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: [[f32, i32], [f64, i64]] + assert_instr: [flogb] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "flogb.{sve_type[0]}" } + + - name: svpmul[{_n}_{type}] + attr: [*sve-unstable] + doc: Polynomial multiply + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8] + assert_instr: [pmul] + n_variant_op: op2 + compose: + - LLVMLink: { name: "pmul.{sve_type}" } + + - name: svpmullb_pair[{_n}_{type}] + attr: [*sve-unstable] + doc: Polynomial multiply long (bottom) + target_features: [sve2-aes] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8, u32, u64] + assert_instr: [pmullb] + n_variant_op: op2 + compose: + - LLVMLink: { name: "pmullb.pair.{sve_type}" } + + - name: svpmullb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Polynomial multiply long (bottom) + target_features: [sve2-aes] + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[u16, u8], [u64, u32]] + assert_instr: [pmullb] + n_variant_op: op2 + compose: + - FnCall: + - "crate::intrinsics::transmute_unchecked" + - [FnCall: ["svpmullb_pair_{type[1]}", [$op1, $op2]]] + - [] + - true + + - name: svpmullt_pair[{_n}_{type}] + attr: [*sve-unstable] + doc: Polynomial multiply long (top) + target_features: [sve2-aes] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8, u32, u64] + assert_instr: [pmullt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "pmullt.pair.{sve_type}" } + + - name: svpmullt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Polynomial multiply long (top) + target_features: [sve2-aes] + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[u16, u8], [u64, u32]] + assert_instr: [pmullt] + n_variant_op: op2 + compose: + - FnCall: + - "crate::intrinsics::transmute_unchecked" + - [FnCall: ["svpmullt_pair_{type[1]}", [$op1, $op2]]] + - [] + - true + + - name: svaesd[_{type}] + attr: [*sve-unstable] + doc: AES single round decryption + target_features: [sve2-aes] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8] + assert_instr: [aesd] + compose: + - LLVMLink: { name: "aesd" } + + - name: svaese[_{type}] + attr: [*sve-unstable] + doc: AES single round encryption + target_features: [sve2-aes] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8] + assert_instr: [aese] + compose: + - LLVMLink: { name: "aese" } + + - name: svaesmc[_{type}] + attr: [*sve-unstable] + doc: AES mix columns + target_features: [sve2-aes] + arguments: ["op: {sve_type}"] + return_type: "{sve_type}" + types: [u8] + assert_instr: [aesmc] + compose: + - LLVMLink: { name: "aesmc" } + + - name: svaesimc[_{type}] + attr: [*sve-unstable] + doc: AES inverse mix columns + target_features: [sve2-aes] + arguments: ["op: {sve_type}"] + return_type: "{sve_type}" + types: [u8] + assert_instr: [aesimc] + compose: + - LLVMLink: { name: "aesimc" } + + - name: svsm4e[_{type}] + attr: [*sve-unstable] + doc: SM4 encryption and decryption + target_features: [sve2-sm4] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u32] + assert_instr: [sm4e] + compose: + - LLVMLink: { name: "sm4e" } + + - name: svsm4ekey[_{type}] + attr: [*sve-unstable] + doc: SM4 key updates + target_features: [sve2-sm4] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u32] + assert_instr: [sm4ekey] + compose: + - LLVMLink: { name: "sm4ekey" } diff --git a/crates/stdarch-gen-arm/src/context.rs b/crates/stdarch-gen-arm/src/context.rs index 9b8eb8e8b9..85342a1804 100644 --- a/crates/stdarch-gen-arm/src/context.rs +++ b/crates/stdarch-gen-arm/src/context.rs @@ -43,6 +43,10 @@ pub struct GlobalContext { /// Should all LLVM wrappers convert their arguments to a signed type #[serde(default)] pub auto_llvm_sign_conversion: bool, + + /// Should SVE load/store tests be generated? + #[serde(default)] + pub generate_load_store_tests: bool, } /// Context of an intrinsic group @@ -218,7 +222,7 @@ impl LocalContext { } => Ok(Expression::MacroCall( "static_assert_range".to_string(), format!( - "{variable}, {min}, {max}", + "{variable}, {min}..={max}", min = range.start(), max = range.end() ), @@ -246,7 +250,7 @@ impl LocalContext { |bitsize| Ok(higher_limit / bitsize - 1))?; Ok(Expression::MacroCall( "static_assert_range".to_string(), - format!("{variable}, 0, {max}"), + format!("{variable}, 0..={max}"), )) } else { Err(format!( diff --git a/crates/stdarch-gen-arm/src/expression.rs b/crates/stdarch-gen-arm/src/expression.rs index bf48f0dab7..0b6ffef9d8 100644 --- a/crates/stdarch-gen-arm/src/expression.rs +++ b/crates/stdarch-gen-arm/src/expression.rs @@ -143,8 +143,6 @@ pub enum Expression { LLVMLink(LLVMLink), /// Casts the given expression to the specified (unchecked) type CastAs(Box, String), - /// Returns the LLVM `undef` symbol - SvUndef, /// Multiplication Multiply(Box, Box), /// Xor @@ -295,7 +293,7 @@ impl Expression { /// - An unnecessary `unsafe` is a warning, made into an error by the CI's `-D warnings`. /// /// This **panics** if it encounters an expression that shouldn't appear in a safe function at - /// all (such as `SvUndef`). + /// all. pub fn requires_unsafe_wrapper(&self, ctx_fn: &str) -> bool { match self { // The call will need to be unsafe, but the declaration does not. @@ -347,9 +345,6 @@ impl Expression { }, // We only use macros to check const generics (using static assertions). Self::MacroCall(_name, _args) => false, - // Materialising uninitialised values is always unsafe, and we avoid it in safe - // functions. - Self::SvUndef => panic!("Refusing to wrap unsafe SvUndef in safe function '{ctx_fn}'."), // Variants that aren't tokenised. We shouldn't encounter these here. Self::MatchKind(..) => { unimplemented!("The unsafety of {self:?} cannot be determined in '{ctx_fn}'.") @@ -390,9 +385,7 @@ impl FromStr for Expression { static MACRO_RE: LazyLock = LazyLock::new(|| Regex::new(r"^(?P[\w\d_]+)!\((?P.*?)\);?$").unwrap()); - if s == "SvUndef" { - Ok(Expression::SvUndef) - } else if MACRO_RE.is_match(s) { + if MACRO_RE.is_match(s) { let c = MACRO_RE.captures(s).unwrap(); let ex = c["ex"].to_string(); let _: TokenStream = ex @@ -533,7 +526,6 @@ impl ToTokens for Expression { let ty: TokenStream = ty.parse().expect("invalid syntax"); tokens.append_all(quote! { #ex as #ty }) } - Self::SvUndef => tokens.append_all(quote! { simd_reinterpret(()) }), Self::Multiply(lhs, rhs) => tokens.append_all(quote! { #lhs * #rhs }), Self::Xor(lhs, rhs) => tokens.append_all(quote! { #lhs ^ #rhs }), Self::Type(ty) => ty.to_tokens(tokens), diff --git a/crates/stdarch-gen-arm/src/intrinsic.rs b/crates/stdarch-gen-arm/src/intrinsic.rs index ce427d54b3..5d38d45ca6 100644 --- a/crates/stdarch-gen-arm/src/intrinsic.rs +++ b/crates/stdarch-gen-arm/src/intrinsic.rs @@ -630,7 +630,7 @@ impl LLVMLink { match (scope, kind.base_type()) { (Argument, Some(Sized(Bool, bitsize))) if *bitsize != 8 => { - Ok(convert("into", arg)) + Ok(convert("sve_into", arg)) } (Argument, Some(Sized(UInt, _) | Unsized(UInt))) => { if ctx.global.auto_llvm_sign_conversion { @@ -647,27 +647,26 @@ impl LLVMLink { }) .try_collect()?; - let return_type_conversion = if !ctx.global.auto_llvm_sign_conversion { - None - } else { - self.signature - .as_ref() - .and_then(|sig| sig.return_type.as_ref()) - .and_then(|ty| { - if let Some(Sized(Bool, bitsize)) = ty.base_type() { - (*bitsize != 8).then_some(Bool) - } else if let Some(Sized(UInt, _) | Unsized(UInt)) = ty.base_type() { - Some(UInt) - } else { - None - } - }) - }; + let return_type_conversion = self + .signature + .as_ref() + .and_then(|sig| sig.return_type.as_ref()) + .and_then(|ty| { + if let Some(Sized(Bool, bitsize)) = ty.base_type() { + (*bitsize != 8).then_some(Bool) + } else if let Some(Sized(UInt, _) | Unsized(UInt)) = ty.base_type() { + Some(UInt) + } else { + None + } + }); let fn_call = Expression::FnCall(fn_call); match return_type_conversion { - Some(Bool) => Ok(convert("into", fn_call)), - Some(UInt) => Ok(convert("as_unsigned", fn_call)), + Some(Bool) => Ok(convert("sve_into", fn_call)), + Some(UInt) if ctx.global.auto_llvm_sign_conversion => { + Ok(convert("as_unsigned", fn_call)) + } _ => Ok(fn_call), } } @@ -872,8 +871,8 @@ impl fmt::Display for UnsafetyComment { Self::NoProvenance(arg) => write!( f, "Addresses passed in `{arg}` lack provenance, so this is similar to using a \ - `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before \ - using it." + `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane \ + before using it." ), Self::UnpredictableOnFault => write!( f, diff --git a/crates/stdarch-gen-arm/src/main.rs b/crates/stdarch-gen-arm/src/main.rs index e14e278248..b7e2aa416f 100644 --- a/crates/stdarch-gen-arm/src/main.rs +++ b/crates/stdarch-gen-arm/src/main.rs @@ -54,7 +54,7 @@ fn main() -> Result<(), String> { vv.into_iter().flatten().collect_vec() })?; - if filepath.ends_with("sve.spec.yml") || filepath.ends_with("sve2.spec.yml") { + if input.ctx.generate_load_store_tests { let loads = intrinsics.iter() .filter_map(|i| { if matches!(i.test, Test::Load(..)) { diff --git a/crates/stdarch-gen-arm/src/typekinds.rs b/crates/stdarch-gen-arm/src/typekinds.rs index bd47ff2bd1..c3aa22294d 100644 --- a/crates/stdarch-gen-arm/src/typekinds.rs +++ b/crates/stdarch-gen-arm/src/typekinds.rs @@ -289,9 +289,9 @@ impl TypeKind { ( BaseType::Sized(Float | Int | UInt, _), BaseType::Sized(Float | Int | UInt, _), - ) => Some(FnCall::new_expression( + ) => Some(FnCall::new_unsafe_expression( // Conversions between float and (u)int, or where the lane size changes. - "simd_reinterpret".parse().unwrap(), + "transmute_unchecked".parse().unwrap(), vec![expr.into()], )), _ => None, diff --git a/crates/stdarch-verify/src/lib.rs b/crates/stdarch-verify/src/lib.rs index c81f5f45bc..f7304ab326 100644 --- a/crates/stdarch-verify/src/lib.rs +++ b/crates/stdarch-verify/src/lib.rs @@ -120,6 +120,13 @@ fn functions(input: TokenStream, dirs: &[&str]) -> TokenStream { ); } + // Newer intrinsics don't have `rustc_legacy_const_generics` - assume they belong at + // the end of the argument list + if required_const.is_empty() && legacy_const_generics.is_empty() { + legacy_const_generics = + (arguments.len()..(arguments.len() + const_arguments.len())).collect(); + } + // The list of required consts, used to verify the arguments, comes from either the // `rustc_args_required_const` or the `rustc_legacy_const_generics` attribute. let required_const = if required_const.is_empty() { @@ -136,14 +143,14 @@ fn functions(input: TokenStream, dirs: &[&str]) -> TokenStream { arguments.insert(idx, ty); } - // strip leading underscore from fn name when building a test - // _mm_foo -> mm_foo such that the test name is test_mm_foo. - let test_name_string = format!("{name}"); - let mut test_name_id = test_name_string.as_str(); - while test_name_id.starts_with('_') { - test_name_id = &test_name_id[1..]; - } - let has_test = tests.contains(&format!("test_{test_name_id}")); + // Strip leading underscore from fn name when building a test + // `_mm_foo` -> `mm_foo` such that the test name is `test_mm_foo`. + let test_name = name.to_string(); + let test_name = test_name.trim_start_matches('_'); + let has_test = tests.contains(&format!("test_{test_name}")) + // SVE load/store tests start with `test` or `_with_` + || tests.iter().any(|t| t.starts_with(&format!("test_{test_name}")) + || t.ends_with(&format!("_with_{test_name}"))); let doc = find_doc(&f.attrs); @@ -347,6 +354,50 @@ fn to_type(t: &syn::Type) -> proc_macro2::TokenStream { "v4f32" => quote! { &v4f32 }, "v2f64" => quote! { &v2f64 }, + "svbool_t" => quote! { &SVBOOL }, + "svint8_t" => quote! { &SVI8 }, + "svint8x2_t" => quote! { &SVI8X2 }, + "svint8x3_t" => quote! { &SVI8X3 }, + "svint8x4_t" => quote! { &SVI8X4 }, + "svint16_t" => quote! { &SVI16 }, + "svint16x2_t" => quote! { &SVI16X2 }, + "svint16x3_t" => quote! { &SVI16X3 }, + "svint16x4_t" => quote! { &SVI16X4 }, + "svint32_t" => quote! { &SVI32 }, + "svint32x2_t" => quote! { &SVI32X2 }, + "svint32x3_t" => quote! { &SVI32X3 }, + "svint32x4_t" => quote! { &SVI32X4 }, + "svint64_t" => quote! { &SVI64 }, + "svint64x2_t" => quote! { &SVI64X2 }, + "svint64x3_t" => quote! { &SVI64X3 }, + "svint64x4_t" => quote! { &SVI64X4 }, + "svuint8_t" => quote! { &SVU8 }, + "svuint8x2_t" => quote! { &SVU8X2 }, + "svuint8x3_t" => quote! { &SVU8X3 }, + "svuint8x4_t" => quote! { &SVU8X4 }, + "svuint16_t" => quote! { &SVU16 }, + "svuint16x2_t" => quote! { &SVU16X2 }, + "svuint16x3_t" => quote! { &SVU16X3 }, + "svuint16x4_t" => quote! { &SVU16X4 }, + "svuint32_t" => quote! { &SVU32 }, + "svuint32x2_t" => quote! { &SVU32X2 }, + "svuint32x3_t" => quote! { &SVU32X3 }, + "svuint32x4_t" => quote! { &SVU32X4 }, + "svuint64_t" => quote! { &SVU64 }, + "svuint64x2_t" => quote! { &SVU64X2 }, + "svuint64x3_t" => quote! { &SVU64X3 }, + "svuint64x4_t" => quote! { &SVU64X4 }, + "svfloat32_t" => quote! { &SVF32 }, + "svfloat32x2_t" => quote! { &SVF32X2 }, + "svfloat32x3_t" => quote! { &SVF32X3 }, + "svfloat32x4_t" => quote! { &SVF32X4 }, + "svfloat64_t" => quote! { &SVF64 }, + "svfloat64x2_t" => quote! { &SVF64X2 }, + "svfloat64x3_t" => quote! { &SVF64X3 }, + "svfloat64x4_t" => quote! { &SVF64X4 }, + "svprfop" => quote! { &SVPRFOP }, + "svpattern" => quote! { &SVPATTERN }, + // Generic types "T" => quote! { &GENERICT }, "U" => quote! { &GENERICU }, diff --git a/crates/stdarch-verify/tests/arm.rs b/crates/stdarch-verify/tests/arm.rs index 3ef9ce2a38..a37af2222a 100644 --- a/crates/stdarch-verify/tests/arm.rs +++ b/crates/stdarch-verify/tests/arm.rs @@ -16,6 +16,7 @@ struct Function { doc: &'static str, } +static BOOL: Type = Type::PrimBool; static F16: Type = Type::PrimFloat(16); static F32: Type = Type::PrimFloat(32); static F64: Type = Type::PrimFloat(64); @@ -28,6 +29,7 @@ static U32: Type = Type::PrimUnsigned(32); static U64: Type = Type::PrimUnsigned(64); static U8: Type = Type::PrimUnsigned(8); static NEVER: Type = Type::Never; +static VOID: Type = Type::Void; static GENERICT: Type = Type::GenericParam("T"); static GENERICU: Type = Type::GenericParam("U"); @@ -151,19 +153,78 @@ static U8X8X2: Type = Type::U(8, 8, 2); static U8X8X3: Type = Type::U(8, 8, 3); static U8X8X4: Type = Type::U(8, 8, 4); +static SVBOOL: Type = Type::Pred(1); +static SVBOOLX2: Type = Type::Pred(2); +static SVBOOLX3: Type = Type::Pred(3); +static SVBOOLX4: Type = Type::Pred(4); +static SVCOUNT: Type = Type::Pred(1); +static SVF16: Type = Type::SVF(16, 1); +static SVF16X2: Type = Type::SVF(16, 2); +static SVF16X3: Type = Type::SVF(16, 3); +static SVF16X4: Type = Type::SVF(16, 4); +static SVF32: Type = Type::SVF(32, 1); +static SVF32X2: Type = Type::SVF(32, 2); +static SVF32X3: Type = Type::SVF(32, 3); +static SVF32X4: Type = Type::SVF(32, 4); +static SVF64: Type = Type::SVF(64, 1); +static SVF64X2: Type = Type::SVF(64, 2); +static SVF64X3: Type = Type::SVF(64, 3); +static SVF64X4: Type = Type::SVF(64, 4); +static SVI8: Type = Type::SVI(8, 1); +static SVI8X2: Type = Type::SVI(8, 2); +static SVI8X3: Type = Type::SVI(8, 3); +static SVI8X4: Type = Type::SVI(8, 4); +static SVI16: Type = Type::SVI(16, 1); +static SVI16X2: Type = Type::SVI(16, 2); +static SVI16X3: Type = Type::SVI(16, 3); +static SVI16X4: Type = Type::SVI(16, 4); +static SVI32: Type = Type::SVI(32, 1); +static SVI32X2: Type = Type::SVI(32, 2); +static SVI32X3: Type = Type::SVI(32, 3); +static SVI32X4: Type = Type::SVI(32, 4); +static SVI64: Type = Type::SVI(64, 1); +static SVI64X2: Type = Type::SVI(64, 2); +static SVI64X3: Type = Type::SVI(64, 3); +static SVI64X4: Type = Type::SVI(64, 4); +static SVU8: Type = Type::SVU(8, 1); +static SVU8X2: Type = Type::SVU(8, 2); +static SVU8X3: Type = Type::SVU(8, 3); +static SVU8X4: Type = Type::SVU(8, 4); +static SVU16: Type = Type::SVU(16, 1); +static SVU16X2: Type = Type::SVU(16, 2); +static SVU16X3: Type = Type::SVU(16, 3); +static SVU16X4: Type = Type::SVU(16, 4); +static SVU32: Type = Type::SVU(32, 1); +static SVU32X2: Type = Type::SVU(32, 2); +static SVU32X3: Type = Type::SVU(32, 3); +static SVU32X4: Type = Type::SVU(32, 4); +static SVU64: Type = Type::SVU(64, 1); +static SVU64X2: Type = Type::SVU(64, 2); +static SVU64X3: Type = Type::SVU(64, 3); +static SVU64X4: Type = Type::SVU(64, 4); +static SVPRFOP: Type = Type::Enum("svprfop"); +static SVPATTERN: Type = Type::Enum("svpattern"); + #[derive(Debug, Copy, Clone, PartialEq)] enum Type { + Void, + PrimBool, PrimFloat(u8), PrimSigned(u8), PrimUnsigned(u8), PrimPoly(u8), MutPtr(&'static Type), ConstPtr(&'static Type), + Enum(&'static str), GenericParam(&'static str), I(u8, u8, u8), U(u8, u8, u8), P(u8, u8, u8), F(u8, u8, u8), + Pred(u8), + SVI(u8, u8), + SVU(u8, u8), + SVF(u8, u8), Never, } @@ -182,231 +243,20 @@ fn verify_all_signatures() { let mut all_valid = true; for rust in FUNCTIONS { + // Most SVE intrinsics just rely on the intrinsics test tool for validation if !rust.has_test { - let skip = [ - "vaddq_s64", - "vaddq_u64", - "vrsqrte_f32", - "vtbl1_s8", - "vtbl1_u8", - "vtbl1_p8", - "vtbl2_s8", - "vtbl2_u8", - "vtbl2_p8", - "vtbl3_s8", - "vtbl3_u8", - "vtbl3_p8", - "vtbl4_s8", - "vtbl4_u8", - "vtbl4_p8", - "vtbx1_s8", - "vtbx1_u8", - "vtbx1_p8", - "vtbx2_s8", - "vtbx2_u8", - "vtbx2_p8", - "vtbx3_s8", - "vtbx3_u8", - "vtbx3_p8", - "vtbx4_s8", - "vtbx4_u8", - "vtbx4_p8", - "udf", - "_clz_u8", - "_clz_u16", - "_clz_u32", - "_rbit_u32", - "_rev_u16", - "_rev_u32", - "__breakpoint", - "vpminq_f32", - "vpminq_f64", - "vpmaxq_f32", - "vpmaxq_f64", - "vcombine_s8", - "vcombine_s16", - "vcombine_s32", - "vcombine_s64", - "vcombine_u8", - "vcombine_u16", - "vcombine_u32", - "vcombine_u64", - "vcombine_p64", - "vcombine_f32", - "vcombine_p8", - "vcombine_p16", - "vcombine_f64", - "vtbl1_s8", - "vtbl1_u8", - "vtbl1_p8", - "vtbl2_s8", - "vtbl2_u8", - "vtbl2_p8", - "vtbl3_s8", - "vtbl3_u8", - "vtbl3_p8", - "vtbl4_s8", - "vtbl4_u8", - "vtbl4_p8", - "vtbx1_s8", - "vtbx1_u8", - "vtbx1_p8", - "vtbx2_s8", - "vtbx2_u8", - "vtbx2_p8", - "vtbx3_s8", - "vtbx3_u8", - "vtbx3_p8", - "vtbx4_s8", - "vtbx4_u8", - "vtbx4_p8", - "vqtbl1_s8", - "vqtbl1q_s8", - "vqtbl1_u8", - "vqtbl1q_u8", - "vqtbl1_p8", - "vqtbl1q_p8", - "vqtbx1_s8", - "vqtbx1q_s8", - "vqtbx1_u8", - "vqtbx1q_u8", - "vqtbx1_p8", - "vqtbx1q_p8", - "vqtbl2_s8", - "vqtbl2q_s8", - "vqtbl2_u8", - "vqtbl2q_u8", - "vqtbl2_p8", - "vqtbl2q_p8", - "vqtbx2_s8", - "vqtbx2q_s8", - "vqtbx2_u8", - "vqtbx2q_u8", - "vqtbx2_p8", - "vqtbx2q_p8", - "vqtbl3_s8", - "vqtbl3q_s8", - "vqtbl3_u8", - "vqtbl3q_u8", - "vqtbl3_p8", - "vqtbl3q_p8", - "vqtbx3_s8", - "vqtbx3q_s8", - "vqtbx3_u8", - "vqtbx3q_u8", - "vqtbx3_p8", - "vqtbx3q_p8", - "vqtbl4_s8", - "vqtbl4q_s8", - "vqtbl4_u8", - "vqtbl4q_u8", - "vqtbl4_p8", - "vqtbl4q_p8", - "vqtbx4_s8", - "vqtbx4q_s8", - "vqtbx4_u8", - "vqtbx4q_u8", - "vqtbx4_p8", - "vqtbx4q_p8", - "brk", - "_rev_u64", - "_clz_u64", - "_rbit_u64", - "_cls_u32", - "_cls_u64", - "_prefetch", - "vsli_n_s8", - "vsliq_n_s8", - "vsli_n_s16", - "vsliq_n_s16", - "vsli_n_s32", - "vsliq_n_s32", - "vsli_n_s64", - "vsliq_n_s64", - "vsli_n_u8", - "vsliq_n_u8", - "vsli_n_u16", - "vsliq_n_u16", - "vsli_n_u32", - "vsliq_n_u32", - "vsli_n_u64", - "vsliq_n_u64", - "vsli_n_p8", - "vsliq_n_p8", - "vsli_n_p16", - "vsliq_n_p16", - "vsli_n_p64", - "vsliq_n_p64", - "vsri_n_s8", - "vsriq_n_s8", - "vsri_n_s16", - "vsriq_n_s16", - "vsri_n_s32", - "vsriq_n_s32", - "vsri_n_s64", - "vsriq_n_s64", - "vsri_n_u8", - "vsriq_n_u8", - "vsri_n_u16", - "vsriq_n_u16", - "vsri_n_u32", - "vsriq_n_u32", - "vsri_n_u64", - "vsriq_n_u64", - "vsri_n_p8", - "vsriq_n_p8", - "vsri_n_p16", - "vsriq_n_p16", - "vsri_n_p64", - "vsriq_n_p64", - "__smulbb", - "__smultb", - "__smulbt", - "__smultt", - "__smulwb", - "__smulwt", - "__qadd", - "__qsub", - "__qdbl", - "__smlabb", - "__smlabt", - "__smlatb", - "__smlatt", - "__smlawb", - "__smlawt", - "__qadd8", - "__qsub8", - "__qsub16", - "__qadd16", - "__qasx", - "__qsax", - "__sadd16", - "__sadd8", - "__smlad", - "__smlsd", - "__sasx", - "__sel", - "__shadd8", - "__shadd16", - "__shsub8", - "__usub8", - "__ssub8", - "__shsub16", - "__smuad", - "__smuadx", - "__smusd", - "__smusdx", - "__usad8", - "__usada8", - "__ldrex", - "__strex", - "__ldrexb", - "__strexb", - "__ldrexh", - "__strexh", - "__clrex", - "__dbg", - ]; + if !SKIP_RUNTIME_TESTS.contains(&rust.name) + // Most run-time tests are handled by the intrinsic-test tool, except for + // load/stores (which have generated tests) + && (!rust.name.starts_with("sv") || rust.name.starts_with("svld") + || rust.name.starts_with("svst")) + // The load/store test generator can't handle these cases yet + && (!rust.name.contains("_u32base_") || rust.name.contains("index") || rust.name.contains("offset")) + && !(rust.name.starts_with("svldff1") && rust.name.contains("gather")) + { + println!("missing run-time test for `{}`", rust.name); + all_valid = false; + } } // Skip some intrinsics that aren't NEON and are located in different @@ -479,12 +329,21 @@ fn matches(rust: &Function, arm: &Intrinsic) -> Result<(), String> { let mut nconst = 0; let iter = rust.arguments.iter().zip(&arm.arguments).enumerate(); for (i, (rust_ty, (arm, arm_const))) in iter { - if *rust_ty != arm { - bail!("mismatched arguments: {rust_ty:?} != {arm:?}") + match (*rust_ty, arm) { + // SVE uses generic type parameters to handle void pointers + (Type::ConstPtr(Type::GenericParam("T")), Type::ConstPtr(Type::Void)) => (), + // SVE const generics use i32 over u64 for usability reasons + (Type::PrimSigned(32), Type::PrimUnsigned(64)) if rust.required_const.contains(&i) => { + () + } + // svset doesn't have its const argument last as we assumed when building the Function + _ if rust.name.starts_with("svset") => (), + (x, y) if x == y => (), + _ => bail!("mismatched arguments: {rust_ty:?} != {arm:?}"), } if *arm_const { nconst += 1; - if !rust.required_const.contains(&i) { + if !rust.required_const.contains(&i) && !rust.name.starts_with("svset") { bail!("argument const mismatch"); } } @@ -493,7 +352,7 @@ fn matches(rust: &Function, arm: &Intrinsic) -> Result<(), String> { bail!("wrong number of const arguments"); } - if rust.instrs.is_empty() { + if rust.instrs.is_empty() && arm.instruction != "" { bail!( "instruction not listed for `{}`, but arm lists {:?}", rust.name, @@ -532,7 +391,7 @@ fn matches(rust: &Function, arm: &Intrinsic) -> Result<(), String> { Ok(()) } -#[derive(PartialEq)] +#[derive(Debug, PartialEq)] struct Intrinsic { name: String, ret: Option, @@ -547,7 +406,7 @@ struct JsonIntrinsic { arguments: Vec, return_type: ReturnType, #[serde(default)] - instructions: Vec>, + instructions: Option>>, } #[derive(Deserialize, Debug)] @@ -566,6 +425,8 @@ fn parse_intrinsics(intrinsics: Vec) -> HashMap Intrinsic { let name = intr.name; + // Remove '[' and ']' so that intrinsics of the form `svwhilerw[_s16]` becomes `svwhilerw_s16`. + let name = name.replace('[', "").replace(']', ""); let ret = if intr.return_type.value == "void" { None } else { @@ -574,18 +435,24 @@ fn parse_intrinsic(mut intr: JsonIntrinsic) -> Intrinsic { // This ignores multiple instructions and different optional sequences for now to mimic // the old HTML scraping behaviour - let instruction = intr.instructions.swap_remove(0).swap_remove(0); + let instruction = intr + .instructions + .map_or(String::new(), |mut i| i.swap_remove(0).swap_remove(0)); let arguments = intr .arguments .iter() .map(|s| { - let (ty, konst) = match s.strip_prefix("const") { - Some(stripped) => (stripped.trim_start(), true), - None => (s.as_str(), false), + let ty = if let Some(i) = s.find('*') { + &s[..i + 1] + } else { + s.rsplit_once(' ').unwrap().0.trim_start_matches("const ") }; - let ty = ty.rsplit_once(' ').unwrap().0; - (parse_ty(ty), konst) + let ty = parse_ty(ty); + let konst = s.contains("const") && !matches!(ty, Type::ConstPtr(_)) + || s.starts_with("enum") + || s.rsplit_once(" ").unwrap().1.starts_with("imm"); + (ty, konst) }) .collect::>(); @@ -598,18 +465,27 @@ fn parse_intrinsic(mut intr: JsonIntrinsic) -> Intrinsic { } fn parse_ty(s: &str) -> Type { - let suffix = " const *"; - if let Some(base) = s.strip_suffix(suffix) { - Type::ConstPtr(parse_ty_base(base)) - } else if let Some(base) = s.strip_suffix(" *") { - Type::MutPtr(parse_ty_base(base)) + if let Some(ty) = s.strip_suffix("*") { + let ty = ty.trim(); + if let Some(ty) = ty.strip_prefix("const") { + // SVE intrinsics are west-const (`const int8_t *`) + Type::ConstPtr(parse_ty_base(ty)) + } else if let Some(ty) = ty.strip_suffix("const") { + // Neon intrinsics are east-const (`int8_t const *`) + Type::ConstPtr(parse_ty_base(ty)) + } else { + Type::MutPtr(parse_ty_base(ty)) + } } else { *parse_ty_base(s) } } fn parse_ty_base(s: &str) -> &'static Type { + let s = s.trim(); match s { + "bool" => &BOOL, + "void" => &VOID, "float16_t" => &F16, "float16x4_t" => &F16X4, "float16x4x2_t" => &F16X4X2, @@ -739,7 +615,4526 @@ fn parse_ty_base(s: &str) -> &'static Type { "uint8x8x2_t" => &U8X8X2, "uint8x8x3_t" => &U8X8X3, "uint8x8x4_t" => &U8X8X4, + "svbool_t" => &SVBOOL, + "svboolx2_t" => &SVBOOLX2, + "svboolx3_t" => &SVBOOLX3, + "svboolx4_t" => &SVBOOLX4, + "svcount_t" => &SVCOUNT, + "svfloat16_t" => &SVF16, + "svfloat16x2_t" => &SVF16X2, + "svfloat16x3_t" => &SVF16X3, + "svfloat16x4_t" => &SVF16X4, + "svfloat32_t" => &SVF32, + "svfloat32x2_t" => &SVF32X2, + "svfloat32x3_t" => &SVF32X3, + "svfloat32x4_t" => &SVF32X4, + "svfloat64_t" => &SVF64, + "svfloat64x2_t" => &SVF64X2, + "svfloat64x3_t" => &SVF64X3, + "svfloat64x4_t" => &SVF64X4, + "svint8_t" => &SVI8, + "svint8x2_t" => &SVI8X2, + "svint8x3_t" => &SVI8X3, + "svint8x4_t" => &SVI8X4, + "svint16_t" => &SVI16, + "svint16x2_t" => &SVI16X2, + "svint16x3_t" => &SVI16X3, + "svint16x4_t" => &SVI16X4, + "svint32_t" => &SVI32, + "svint32x2_t" => &SVI32X2, + "svint32x3_t" => &SVI32X3, + "svint32x4_t" => &SVI32X4, + "svint64_t" => &SVI64, + "svint64x2_t" => &SVI64X2, + "svint64x3_t" => &SVI64X3, + "svint64x4_t" => &SVI64X4, + "svuint8_t" => &SVU8, + "svuint8x2_t" => &SVU8X2, + "svuint8x3_t" => &SVU8X3, + "svuint8x4_t" => &SVU8X4, + "svuint16_t" => &SVU16, + "svuint16x2_t" => &SVU16X2, + "svuint16x3_t" => &SVU16X3, + "svuint16x4_t" => &SVU16X4, + "svuint32_t" => &SVU32, + "svuint32x2_t" => &SVU32X2, + "svuint32x3_t" => &SVU32X3, + "svuint32x4_t" => &SVU32X4, + "svuint64_t" => &SVU64, + "svuint64x2_t" => &SVU64X2, + "svuint64x3_t" => &SVU64X3, + "svuint64x4_t" => &SVU64X4, + "enum svprfop" => &SVPRFOP, + "enum svpattern" => &SVPATTERN, _ => panic!("failed to parse json type {s:?}"), } } + +// FIXME(arm-maintainers): With the advent of the `intrinsic-test` tool, new tests of this kind +// are no longer being added and just adding to this list indefinitely isn't the best solution for +// dealing with that. +static SKIP_RUNTIME_TESTS: &'static [&'static str] = &[ + "vaddq_s64", + "vaddq_u64", + "vrsqrte_f32", + "vtbl1_s8", + "vtbl1_u8", + "vtbl1_p8", + "vtbl2_s8", + "vtbl2_u8", + "vtbl2_p8", + "vtbl3_s8", + "vtbl3_u8", + "vtbl3_p8", + "vtbl4_s8", + "vtbl4_u8", + "vtbl4_p8", + "vtbx1_s8", + "vtbx1_u8", + "vtbx1_p8", + "vtbx2_s8", + "vtbx2_u8", + "vtbx2_p8", + "vtbx3_s8", + "vtbx3_u8", + "vtbx3_p8", + "vtbx4_s8", + "vtbx4_u8", + "vtbx4_p8", + "udf", + "_clz_u8", + "_clz_u16", + "_clz_u32", + "_rbit_u32", + "_rev_u16", + "_rev_u32", + "__breakpoint", + "vpminq_f32", + "vpminq_f64", + "vpmaxq_f32", + "vpmaxq_f64", + "vcombine_s8", + "vcombine_s16", + "vcombine_s32", + "vcombine_s64", + "vcombine_u8", + "vcombine_u16", + "vcombine_u32", + "vcombine_u64", + "vcombine_p64", + "vcombine_f32", + "vcombine_p8", + "vcombine_p16", + "vcombine_f64", + "vtbl1_s8", + "vtbl1_u8", + "vtbl1_p8", + "vtbl2_s8", + "vtbl2_u8", + "vtbl2_p8", + "vtbl3_s8", + "vtbl3_u8", + "vtbl3_p8", + "vtbl4_s8", + "vtbl4_u8", + "vtbl4_p8", + "vtbx1_s8", + "vtbx1_u8", + "vtbx1_p8", + "vtbx2_s8", + "vtbx2_u8", + "vtbx2_p8", + "vtbx3_s8", + "vtbx3_u8", + "vtbx3_p8", + "vtbx4_s8", + "vtbx4_u8", + "vtbx4_p8", + "vqtbl1_s8", + "vqtbl1q_s8", + "vqtbl1_u8", + "vqtbl1q_u8", + "vqtbl1_p8", + "vqtbl1q_p8", + "vqtbx1_s8", + "vqtbx1q_s8", + "vqtbx1_u8", + "vqtbx1q_u8", + "vqtbx1_p8", + "vqtbx1q_p8", + "vqtbl2_s8", + "vqtbl2q_s8", + "vqtbl2_u8", + "vqtbl2q_u8", + "vqtbl2_p8", + "vqtbl2q_p8", + "vqtbx2_s8", + "vqtbx2q_s8", + "vqtbx2_u8", + "vqtbx2q_u8", + "vqtbx2_p8", + "vqtbx2q_p8", + "vqtbl3_s8", + "vqtbl3q_s8", + "vqtbl3_u8", + "vqtbl3q_u8", + "vqtbl3_p8", + "vqtbl3q_p8", + "vqtbx3_s8", + "vqtbx3q_s8", + "vqtbx3_u8", + "vqtbx3q_u8", + "vqtbx3_p8", + "vqtbx3q_p8", + "vqtbl4_s8", + "vqtbl4q_s8", + "vqtbl4_u8", + "vqtbl4q_u8", + "vqtbl4_p8", + "vqtbl4q_p8", + "vqtbx4_s8", + "vqtbx4q_s8", + "vqtbx4_u8", + "vqtbx4q_u8", + "vqtbx4_p8", + "vqtbx4q_p8", + "brk", + "_rev_u64", + "_clz_u64", + "_rbit_u64", + "_cls_u32", + "_cls_u64", + "_prefetch", + "vsli_n_s8", + "vsliq_n_s8", + "vsli_n_s16", + "vsliq_n_s16", + "vsli_n_s32", + "vsliq_n_s32", + "vsli_n_s64", + "vsliq_n_s64", + "vsli_n_u8", + "vsliq_n_u8", + "vsli_n_u16", + "vsliq_n_u16", + "vsli_n_u32", + "vsliq_n_u32", + "vsli_n_u64", + "vsliq_n_u64", + "vsli_n_p8", + "vsliq_n_p8", + "vsli_n_p16", + "vsliq_n_p16", + "vsli_n_p64", + "vsliq_n_p64", + "vsri_n_s8", + "vsriq_n_s8", + "vsri_n_s16", + "vsriq_n_s16", + "vsri_n_s32", + "vsriq_n_s32", + "vsri_n_s64", + "vsriq_n_s64", + "vsri_n_u8", + "vsriq_n_u8", + "vsri_n_u16", + "vsriq_n_u16", + "vsri_n_u32", + "vsriq_n_u32", + "vsri_n_u64", + "vsriq_n_u64", + "vsri_n_p8", + "vsriq_n_p8", + "vsri_n_p16", + "vsriq_n_p16", + "vsri_n_p64", + "vsriq_n_p64", + "__smulbb", + "__smultb", + "__smulbt", + "__smultt", + "__smulwb", + "__smulwt", + "__qadd", + "__qsub", + "__qdbl", + "__smlabb", + "__smlabt", + "__smlatb", + "__smlatt", + "__smlawb", + "__smlawt", + "__qadd8", + "__qsub8", + "__qsub16", + "__qadd16", + "__qasx", + "__qsax", + "__sadd16", + "__sadd8", + "__smlad", + "__smlsd", + "__sasx", + "__sel", + "__shadd8", + "__shadd16", + "__shsub8", + "__usub8", + "__ssub8", + "__shsub16", + "__smuad", + "__smuadx", + "__smusd", + "__smusdx", + "__usad8", + "__usada8", + "__ldrex", + "__strex", + "__ldrexb", + "__strexb", + "__ldrexh", + "__strexh", + "__clrex", + "__dbg", + "__crc32cd", + "__crc32d", + "__jcvt", + "vabal_high_s8", + "vabal_high_s16", + "vabal_high_s32", + "vabal_high_u8", + "vabal_high_u16", + "vabal_high_u32", + "vabd_f64", + "vabdq_f64", + "vabdd_f64", + "vabds_f32", + "vabdh_f16", + "vabdl_high_s16", + "vabdl_high_s32", + "vabdl_high_s8", + "vabdl_high_u8", + "vabdl_high_u16", + "vabdl_high_u32", + "vabs_f64", + "vabsq_f64", + "vabs_s64", + "vabsq_s64", + "vabsd_s64", + "vaddlv_s16", + "vaddlvq_s16", + "vaddlvq_s32", + "vaddlv_s32", + "vaddlv_s8", + "vaddlvq_s8", + "vaddlv_u16", + "vaddlvq_u16", + "vaddlvq_u32", + "vaddlv_u32", + "vaddlv_u8", + "vaddlvq_u8", + "vaddv_f32", + "vaddvq_f32", + "vaddvq_f64", + "vaddv_s32", + "vaddv_s8", + "vaddvq_s8", + "vaddv_s16", + "vaddvq_s16", + "vaddvq_s32", + "vaddv_u32", + "vaddv_u8", + "vaddvq_u8", + "vaddv_u16", + "vaddvq_u16", + "vaddvq_u32", + "vaddvq_s64", + "vaddvq_u64", + "vamax_f16", + "vamaxq_f16", + "vamax_f32", + "vamaxq_f32", + "vamaxq_f64", + "vamin_f16", + "vaminq_f16", + "vamin_f32", + "vaminq_f32", + "vaminq_f64", + "vbcaxq_s8", + "vbcaxq_s16", + "vbcaxq_s32", + "vbcaxq_s64", + "vbcaxq_u8", + "vbcaxq_u16", + "vbcaxq_u32", + "vbcaxq_u64", + "vcadd_rot270_f16", + "vcaddq_rot270_f16", + "vcadd_rot270_f32", + "vcaddq_rot270_f32", + "vcaddq_rot270_f64", + "vcadd_rot90_f16", + "vcaddq_rot90_f16", + "vcadd_rot90_f32", + "vcaddq_rot90_f32", + "vcaddq_rot90_f64", + "vcage_f64", + "vcageq_f64", + "vcaged_f64", + "vcages_f32", + "vcageh_f16", + "vcagt_f64", + "vcagtq_f64", + "vcagtd_f64", + "vcagts_f32", + "vcagth_f16", + "vcale_f64", + "vcaleq_f64", + "vcaled_f64", + "vcales_f32", + "vcaleh_f16", + "vcalt_f64", + "vcaltq_f64", + "vcaltd_f64", + "vcalts_f32", + "vcalth_f16", + "vceq_f64", + "vceqq_f64", + "vceq_s64", + "vceqq_s64", + "vceq_u64", + "vceqq_u64", + "vceq_p64", + "vceqq_p64", + "vceqd_f64", + "vceqs_f32", + "vceqd_s64", + "vceqd_u64", + "vceqh_f16", + "vceqz_f16", + "vceqzq_f16", + "vceqz_f32", + "vceqzq_f32", + "vceqz_f64", + "vceqzq_f64", + "vceqz_s8", + "vceqzq_s8", + "vceqz_s16", + "vceqzq_s16", + "vceqz_s32", + "vceqzq_s32", + "vceqz_s64", + "vceqzq_s64", + "vceqz_p8", + "vceqzq_p8", + "vceqz_p64", + "vceqzq_p64", + "vceqz_u8", + "vceqzq_u8", + "vceqz_u16", + "vceqzq_u16", + "vceqz_u32", + "vceqzq_u32", + "vceqz_u64", + "vceqzq_u64", + "vceqzd_s64", + "vceqzd_u64", + "vceqzh_f16", + "vceqzs_f32", + "vceqzd_f64", + "vcge_f64", + "vcgeq_f64", + "vcge_s64", + "vcgeq_s64", + "vcge_u64", + "vcgeq_u64", + "vcged_f64", + "vcges_f32", + "vcged_s64", + "vcged_u64", + "vcgeh_f16", + "vcgez_f32", + "vcgezq_f32", + "vcgez_f64", + "vcgezq_f64", + "vcgez_s8", + "vcgezq_s8", + "vcgez_s16", + "vcgezq_s16", + "vcgez_s32", + "vcgezq_s32", + "vcgez_s64", + "vcgezq_s64", + "vcgezd_f64", + "vcgezs_f32", + "vcgezd_s64", + "vcgezh_f16", + "vcgt_f64", + "vcgtq_f64", + "vcgt_s64", + "vcgtq_s64", + "vcgt_u64", + "vcgtq_u64", + "vcgtd_f64", + "vcgts_f32", + "vcgtd_s64", + "vcgtd_u64", + "vcgth_f16", + "vcgtz_f32", + "vcgtzq_f32", + "vcgtz_f64", + "vcgtzq_f64", + "vcgtz_s8", + "vcgtzq_s8", + "vcgtz_s16", + "vcgtzq_s16", + "vcgtz_s32", + "vcgtzq_s32", + "vcgtz_s64", + "vcgtzq_s64", + "vcgtzd_f64", + "vcgtzs_f32", + "vcgtzd_s64", + "vcgtzh_f16", + "vcle_f64", + "vcleq_f64", + "vcle_s64", + "vcleq_s64", + "vcle_u64", + "vcleq_u64", + "vcled_f64", + "vcles_f32", + "vcled_u64", + "vcled_s64", + "vcleh_f16", + "vclez_f32", + "vclezq_f32", + "vclez_f64", + "vclezq_f64", + "vclez_s8", + "vclezq_s8", + "vclez_s16", + "vclezq_s16", + "vclez_s32", + "vclezq_s32", + "vclez_s64", + "vclezq_s64", + "vclezd_f64", + "vclezs_f32", + "vclezd_s64", + "vclezh_f16", + "vclt_f64", + "vcltq_f64", + "vclt_s64", + "vcltq_s64", + "vclt_u64", + "vcltq_u64", + "vcltd_u64", + "vcltd_s64", + "vclth_f16", + "vclts_f32", + "vcltd_f64", + "vcltz_f32", + "vcltzq_f32", + "vcltz_f64", + "vcltzq_f64", + "vcltz_s8", + "vcltzq_s8", + "vcltz_s16", + "vcltzq_s16", + "vcltz_s32", + "vcltzq_s32", + "vcltz_s64", + "vcltzq_s64", + "vcltzd_f64", + "vcltzs_f32", + "vcltzd_s64", + "vcltzh_f16", + "vcmla_f16", + "vcmlaq_f16", + "vcmla_f32", + "vcmlaq_f32", + "vcmlaq_f64", + "vcmla_lane_f16", + "vcmlaq_lane_f16", + "vcmla_lane_f32", + "vcmlaq_lane_f32", + "vcmla_laneq_f16", + "vcmlaq_laneq_f16", + "vcmla_laneq_f32", + "vcmlaq_laneq_f32", + "vcmla_rot180_f16", + "vcmlaq_rot180_f16", + "vcmla_rot180_f32", + "vcmlaq_rot180_f32", + "vcmlaq_rot180_f64", + "vcmla_rot180_lane_f16", + "vcmlaq_rot180_lane_f16", + "vcmla_rot180_lane_f32", + "vcmlaq_rot180_lane_f32", + "vcmla_rot180_laneq_f16", + "vcmlaq_rot180_laneq_f16", + "vcmla_rot180_laneq_f32", + "vcmlaq_rot180_laneq_f32", + "vcmla_rot270_f16", + "vcmlaq_rot270_f16", + "vcmla_rot270_f32", + "vcmlaq_rot270_f32", + "vcmlaq_rot270_f64", + "vcmla_rot270_lane_f16", + "vcmlaq_rot270_lane_f16", + "vcmla_rot270_lane_f32", + "vcmlaq_rot270_lane_f32", + "vcmla_rot270_laneq_f16", + "vcmlaq_rot270_laneq_f16", + "vcmla_rot270_laneq_f32", + "vcmlaq_rot270_laneq_f32", + "vcmla_rot90_f16", + "vcmlaq_rot90_f16", + "vcmla_rot90_f32", + "vcmlaq_rot90_f32", + "vcmlaq_rot90_f64", + "vcmla_rot90_lane_f16", + "vcmlaq_rot90_lane_f16", + "vcmla_rot90_lane_f32", + "vcmlaq_rot90_lane_f32", + "vcmla_rot90_laneq_f16", + "vcmlaq_rot90_laneq_f16", + "vcmla_rot90_laneq_f32", + "vcmlaq_rot90_laneq_f32", + "vcopy_lane_f32", + "vcopy_lane_s8", + "vcopy_lane_s16", + "vcopy_lane_s32", + "vcopy_lane_u8", + "vcopy_lane_u16", + "vcopy_lane_u32", + "vcopy_lane_p8", + "vcopy_lane_p16", + "vcopy_laneq_f32", + "vcopy_laneq_s8", + "vcopy_laneq_s16", + "vcopy_laneq_s32", + "vcopy_laneq_u8", + "vcopy_laneq_u16", + "vcopy_laneq_u32", + "vcopy_laneq_p8", + "vcopy_laneq_p16", + "vcopyq_lane_f32", + "vcopyq_lane_f64", + "vcopyq_lane_s64", + "vcopyq_lane_u64", + "vcopyq_lane_p64", + "vcopyq_lane_s8", + "vcopyq_lane_s16", + "vcopyq_lane_s32", + "vcopyq_lane_u8", + "vcopyq_lane_u16", + "vcopyq_lane_u32", + "vcopyq_lane_p8", + "vcopyq_lane_p16", + "vcopyq_laneq_f32", + "vcopyq_laneq_f64", + "vcopyq_laneq_s8", + "vcopyq_laneq_s16", + "vcopyq_laneq_s32", + "vcopyq_laneq_s64", + "vcopyq_laneq_u8", + "vcopyq_laneq_u16", + "vcopyq_laneq_u32", + "vcopyq_laneq_u64", + "vcopyq_laneq_p8", + "vcopyq_laneq_p16", + "vcopyq_laneq_p64", + "vcreate_f64", + "vcvt_f32_f64", + "vcvt_f64_f32", + "vcvt_f64_s64", + "vcvtq_f64_s64", + "vcvt_f64_u64", + "vcvtq_f64_u64", + "vcvt_high_f16_f32", + "vcvt_high_f32_f16", + "vcvt_high_f32_f64", + "vcvt_high_f64_f32", + "vcvt_n_f64_s64", + "vcvtq_n_f64_s64", + "vcvt_n_f64_u64", + "vcvtq_n_f64_u64", + "vcvt_n_s64_f64", + "vcvtq_n_s64_f64", + "vcvt_n_u64_f64", + "vcvtq_n_u64_f64", + "vcvt_s64_f64", + "vcvtq_s64_f64", + "vcvt_u64_f64", + "vcvtq_u64_f64", + "vcvta_s16_f16", + "vcvtaq_s16_f16", + "vcvta_s32_f32", + "vcvtaq_s32_f32", + "vcvta_s64_f64", + "vcvtaq_s64_f64", + "vcvta_u16_f16", + "vcvtaq_u16_f16", + "vcvta_u32_f32", + "vcvtaq_u32_f32", + "vcvta_u64_f64", + "vcvtaq_u64_f64", + "vcvtah_s16_f16", + "vcvtah_s32_f16", + "vcvtah_s64_f16", + "vcvtah_u16_f16", + "vcvtah_u32_f16", + "vcvtah_u64_f16", + "vcvtas_s32_f32", + "vcvtad_s64_f64", + "vcvtas_u32_f32", + "vcvtad_u64_f64", + "vcvtd_f64_s64", + "vcvts_f32_s32", + "vcvth_f16_s16", + "vcvth_f16_s32", + "vcvth_f16_s64", + "vcvth_f16_u16", + "vcvth_f16_u32", + "vcvth_f16_u64", + "vcvth_n_f16_s16", + "vcvth_n_f16_s32", + "vcvth_n_f16_s64", + "vcvth_n_f16_u16", + "vcvth_n_f16_u32", + "vcvth_n_f16_u64", + "vcvth_n_s16_f16", + "vcvth_n_s32_f16", + "vcvth_n_s64_f16", + "vcvth_n_u16_f16", + "vcvth_n_u32_f16", + "vcvth_n_u64_f16", + "vcvth_s16_f16", + "vcvth_s32_f16", + "vcvth_s64_f16", + "vcvth_u16_f16", + "vcvth_u32_f16", + "vcvth_u64_f16", + "vcvtm_s16_f16", + "vcvtmq_s16_f16", + "vcvtm_s32_f32", + "vcvtmq_s32_f32", + "vcvtm_s64_f64", + "vcvtmq_s64_f64", + "vcvtm_u16_f16", + "vcvtmq_u16_f16", + "vcvtm_u32_f32", + "vcvtmq_u32_f32", + "vcvtm_u64_f64", + "vcvtmq_u64_f64", + "vcvtmh_s16_f16", + "vcvtmh_s32_f16", + "vcvtmh_s64_f16", + "vcvtmh_u16_f16", + "vcvtmh_u32_f16", + "vcvtmh_u64_f16", + "vcvtms_s32_f32", + "vcvtmd_s64_f64", + "vcvtms_u32_f32", + "vcvtmd_u64_f64", + "vcvtn_s16_f16", + "vcvtnq_s16_f16", + "vcvtn_s32_f32", + "vcvtnq_s32_f32", + "vcvtn_s64_f64", + "vcvtnq_s64_f64", + "vcvtn_u16_f16", + "vcvtnq_u16_f16", + "vcvtn_u32_f32", + "vcvtnq_u32_f32", + "vcvtn_u64_f64", + "vcvtnq_u64_f64", + "vcvtnh_s16_f16", + "vcvtnh_s32_f16", + "vcvtnh_s64_f16", + "vcvtnh_u16_f16", + "vcvtnh_u32_f16", + "vcvtnh_u64_f16", + "vcvtns_s32_f32", + "vcvtnd_s64_f64", + "vcvtns_u32_f32", + "vcvtnd_u64_f64", + "vcvtp_s16_f16", + "vcvtpq_s16_f16", + "vcvtp_s32_f32", + "vcvtpq_s32_f32", + "vcvtp_s64_f64", + "vcvtpq_s64_f64", + "vcvtp_u16_f16", + "vcvtpq_u16_f16", + "vcvtp_u32_f32", + "vcvtpq_u32_f32", + "vcvtp_u64_f64", + "vcvtpq_u64_f64", + "vcvtph_s16_f16", + "vcvtph_s32_f16", + "vcvtph_s64_f16", + "vcvtph_u16_f16", + "vcvtph_u32_f16", + "vcvtph_u64_f16", + "vcvtps_s32_f32", + "vcvtpd_s64_f64", + "vcvtps_u32_f32", + "vcvtpd_u64_f64", + "vcvts_f32_u32", + "vcvtd_f64_u64", + "vcvts_n_f32_s32", + "vcvtd_n_f64_s64", + "vcvts_n_f32_u32", + "vcvtd_n_f64_u64", + "vcvts_n_s32_f32", + "vcvtd_n_s64_f64", + "vcvts_n_u32_f32", + "vcvtd_n_u64_f64", + "vcvts_s32_f32", + "vcvtd_s64_f64", + "vcvts_u32_f32", + "vcvtd_u64_f64", + "vcvtx_f32_f64", + "vcvtx_high_f32_f64", + "vcvtxd_f32_f64", + "vdiv_f16", + "vdivq_f16", + "vdiv_f32", + "vdivq_f32", + "vdiv_f64", + "vdivq_f64", + "vdivh_f16", + "vdup_lane_f64", + "vdup_lane_p64", + "vdup_laneq_f64", + "vdup_laneq_p64", + "vdupb_lane_s8", + "vduph_laneq_s16", + "vdupb_lane_u8", + "vduph_laneq_u16", + "vdupb_lane_p8", + "vduph_laneq_p16", + "vdupb_laneq_s8", + "vdupb_laneq_u8", + "vdupb_laneq_p8", + "vdupd_lane_f64", + "vdupd_lane_s64", + "vdupd_lane_u64", + "vduph_lane_f16", + "vduph_laneq_f16", + "vdupq_lane_f64", + "vdupq_lane_p64", + "vdupq_laneq_f64", + "vdupq_laneq_p64", + "vdups_lane_f32", + "vdupd_laneq_f64", + "vdups_lane_s32", + "vdupd_laneq_s64", + "vdups_lane_u32", + "vdupd_laneq_u64", + "vdups_laneq_f32", + "vduph_lane_s16", + "vdups_laneq_s32", + "vduph_lane_u16", + "vdups_laneq_u32", + "vduph_lane_p16", + "veor3q_s8", + "veor3q_s16", + "veor3q_s32", + "veor3q_s64", + "veor3q_u8", + "veor3q_u16", + "veor3q_u32", + "veor3q_u64", + "vextq_f64", + "vextq_p64", + "vfma_f64", + "vfma_lane_f16", + "vfma_laneq_f16", + "vfmaq_lane_f16", + "vfmaq_laneq_f16", + "vfma_lane_f32", + "vfma_laneq_f32", + "vfmaq_lane_f32", + "vfmaq_laneq_f32", + "vfmaq_laneq_f64", + "vfma_lane_f64", + "vfma_laneq_f64", + "vfma_n_f16", + "vfmaq_n_f16", + "vfma_n_f64", + "vfmad_lane_f64", + "vfmah_f16", + "vfmah_lane_f16", + "vfmah_laneq_f16", + "vfmaq_f64", + "vfmaq_lane_f64", + "vfmaq_n_f64", + "vfmas_lane_f32", + "vfmas_laneq_f32", + "vfmad_laneq_f64", + "vfmlal_high_f16", + "vfmlalq_high_f16", + "vfmlal_lane_high_f16", + "vfmlal_laneq_high_f16", + "vfmlalq_lane_high_f16", + "vfmlalq_laneq_high_f16", + "vfmlal_lane_low_f16", + "vfmlal_laneq_low_f16", + "vfmlalq_lane_low_f16", + "vfmlalq_laneq_low_f16", + "vfmlal_low_f16", + "vfmlalq_low_f16", + "vfmlsl_high_f16", + "vfmlslq_high_f16", + "vfmlsl_lane_high_f16", + "vfmlsl_laneq_high_f16", + "vfmlslq_lane_high_f16", + "vfmlslq_laneq_high_f16", + "vfmlsl_lane_low_f16", + "vfmlsl_laneq_low_f16", + "vfmlslq_lane_low_f16", + "vfmlslq_laneq_low_f16", + "vfmlsl_low_f16", + "vfmlslq_low_f16", + "vfms_f64", + "vfms_lane_f16", + "vfms_laneq_f16", + "vfmsq_lane_f16", + "vfmsq_laneq_f16", + "vfms_lane_f32", + "vfms_laneq_f32", + "vfmsq_lane_f32", + "vfmsq_laneq_f32", + "vfmsq_laneq_f64", + "vfms_lane_f64", + "vfms_laneq_f64", + "vfms_n_f16", + "vfmsq_n_f16", + "vfms_n_f64", + "vfmsh_f16", + "vfmsh_lane_f16", + "vfmsh_laneq_f16", + "vfmsq_f64", + "vfmsq_lane_f64", + "vfmsq_n_f64", + "vfmss_lane_f32", + "vfmss_laneq_f32", + "vfmsd_lane_f64", + "vfmsd_laneq_f64", + "vld1_f16", + "vld1q_f16", + "vld1_f64_x2", + "vld1_f64_x3", + "vld1_f64_x4", + "vld1q_f64_x2", + "vld1q_f64_x3", + "vld1q_f64_x4", + "vld2_dup_f64", + "vld2q_dup_f64", + "vld2q_dup_s64", + "vld2_f64", + "vld2_lane_f64", + "vld2_lane_s64", + "vld2_lane_p64", + "vld2_lane_u64", + "vld2q_dup_p64", + "vld2q_dup_p64", + "vld2q_dup_u64", + "vld2q_dup_u64", + "vld2q_f64", + "vld2q_s64", + "vld2q_lane_f64", + "vld2q_lane_s8", + "vld2q_lane_s64", + "vld2q_lane_p64", + "vld2q_lane_u8", + "vld2q_lane_u64", + "vld2q_lane_p8", + "vld2q_p64", + "vld2q_p64", + "vld2q_u64", + "vld3_dup_f64", + "vld3q_dup_f64", + "vld3q_dup_s64", + "vld3_f64", + "vld3_lane_f64", + "vld3_lane_p64", + "vld3_lane_s64", + "vld3_lane_u64", + "vld3q_dup_p64", + "vld3q_dup_p64", + "vld3q_dup_u64", + "vld3q_dup_u64", + "vld3q_f64", + "vld3q_s64", + "vld3q_lane_f64", + "vld3q_lane_p64", + "vld3q_lane_s8", + "vld3q_lane_s64", + "vld3q_lane_u8", + "vld3q_lane_u64", + "vld3q_lane_p8", + "vld3q_p64", + "vld3q_p64", + "vld3q_u64", + "vld4_dup_f64", + "vld4q_dup_f64", + "vld4q_dup_s64", + "vld4_f64", + "vld4_lane_f64", + "vld4_lane_s64", + "vld4_lane_p64", + "vld4_lane_u64", + "vld4q_dup_p64", + "vld4q_dup_p64", + "vld4q_dup_u64", + "vld4q_dup_u64", + "vld4q_f64", + "vld4q_s64", + "vld4q_lane_f64", + "vld4q_lane_s8", + "vld4q_lane_s64", + "vld4q_lane_p64", + "vld4q_lane_u8", + "vld4q_lane_u64", + "vld4q_lane_p8", + "vld4q_p64", + "vld4q_p64", + "vld4q_u64", + "vldap1_lane_s64", + "vldap1q_lane_s64", + "vldap1q_lane_f64", + "vldap1_lane_u64", + "vldap1q_lane_u64", + "vldap1_lane_p64", + "vldap1q_lane_p64", + "vluti2_lane_f16", + "vluti2q_lane_f16", + "vluti2_lane_u8", + "vluti2q_lane_u8", + "vluti2_lane_u16", + "vluti2q_lane_u16", + "vluti2_lane_p8", + "vluti2q_lane_p8", + "vluti2_lane_p16", + "vluti2q_lane_p16", + "vluti2_lane_s8", + "vluti2q_lane_s8", + "vluti2_lane_s16", + "vluti2q_lane_s16", + "vluti2_laneq_f16", + "vluti2q_laneq_f16", + "vluti2_laneq_u8", + "vluti2q_laneq_u8", + "vluti2_laneq_u16", + "vluti2q_laneq_u16", + "vluti2_laneq_p8", + "vluti2q_laneq_p8", + "vluti2_laneq_p16", + "vluti2q_laneq_p16", + "vluti2_laneq_s8", + "vluti2q_laneq_s8", + "vluti2_laneq_s16", + "vluti2q_laneq_s16", + "vluti4q_lane_f16_x2", + "vluti4q_lane_u16_x2", + "vluti4q_lane_p16_x2", + "vluti4q_lane_s16_x2", + "vluti4q_lane_s8", + "vluti4q_lane_u8", + "vluti4q_lane_p8", + "vluti4q_laneq_f16_x2", + "vluti4q_laneq_u16_x2", + "vluti4q_laneq_p16_x2", + "vluti4q_laneq_s16_x2", + "vluti4q_laneq_s8", + "vluti4q_laneq_u8", + "vluti4q_laneq_p8", + "vmax_f64", + "vmaxq_f64", + "vmaxh_f16", + "vmaxnm_f64", + "vmaxnmq_f64", + "vmaxnmh_f16", + "vmaxnmv_f16", + "vmaxnmvq_f16", + "vmaxnmv_f32", + "vmaxnmvq_f64", + "vmaxnmvq_f32", + "vmaxv_f16", + "vmaxvq_f16", + "vmaxv_f32", + "vmaxvq_f32", + "vmaxvq_f64", + "vmaxv_s8", + "vmaxvq_s8", + "vmaxv_s16", + "vmaxvq_s16", + "vmaxv_s32", + "vmaxvq_s32", + "vmaxv_u8", + "vmaxvq_u8", + "vmaxv_u16", + "vmaxvq_u16", + "vmaxv_u32", + "vmaxvq_u32", + "vmin_f64", + "vminq_f64", + "vminh_f16", + "vminnm_f64", + "vminnmq_f64", + "vminnmh_f16", + "vminnmv_f16", + "vminnmvq_f16", + "vminnmv_f32", + "vminnmvq_f64", + "vminnmvq_f32", + "vminv_f16", + "vminvq_f16", + "vminv_f32", + "vminvq_f32", + "vminvq_f64", + "vminv_s8", + "vminvq_s8", + "vminv_s16", + "vminvq_s16", + "vminv_s32", + "vminvq_s32", + "vminv_u8", + "vminvq_u8", + "vminv_u16", + "vminvq_u16", + "vminv_u32", + "vminvq_u32", + "vmla_f64", + "vmlaq_f64", + "vmlal_high_lane_s16", + "vmlal_high_laneq_s16", + "vmlal_high_lane_s32", + "vmlal_high_laneq_s32", + "vmlal_high_lane_u16", + "vmlal_high_laneq_u16", + "vmlal_high_lane_u32", + "vmlal_high_laneq_u32", + "vmlal_high_n_s16", + "vmlal_high_n_s32", + "vmlal_high_n_u16", + "vmlal_high_n_u32", + "vmlal_high_s8", + "vmlal_high_s16", + "vmlal_high_s32", + "vmlal_high_u8", + "vmlal_high_u16", + "vmlal_high_u32", + "vmls_f64", + "vmlsq_f64", + "vmlsl_high_lane_s16", + "vmlsl_high_laneq_s16", + "vmlsl_high_lane_s32", + "vmlsl_high_laneq_s32", + "vmlsl_high_lane_u16", + "vmlsl_high_laneq_u16", + "vmlsl_high_lane_u32", + "vmlsl_high_laneq_u32", + "vmlsl_high_n_s16", + "vmlsl_high_n_s32", + "vmlsl_high_n_u16", + "vmlsl_high_n_u32", + "vmlsl_high_s8", + "vmlsl_high_s16", + "vmlsl_high_s32", + "vmlsl_high_u8", + "vmlsl_high_u16", + "vmlsl_high_u32", + "vmovl_high_s8", + "vmovl_high_s16", + "vmovl_high_s32", + "vmovl_high_u8", + "vmovl_high_u16", + "vmovl_high_u32", + "vmovn_high_s16", + "vmovn_high_s32", + "vmovn_high_s64", + "vmovn_high_u16", + "vmovn_high_u32", + "vmovn_high_u64", + "vmul_f64", + "vmulq_f64", + "vmul_lane_f64", + "vmul_laneq_f16", + "vmulq_laneq_f16", + "vmul_laneq_f64", + "vmul_n_f64", + "vmulq_n_f64", + "vmuld_lane_f64", + "vmulh_f16", + "vmulh_lane_f16", + "vmulh_laneq_f16", + "vmull_high_lane_s16", + "vmull_high_laneq_s16", + "vmull_high_lane_s32", + "vmull_high_laneq_s32", + "vmull_high_lane_u16", + "vmull_high_laneq_u16", + "vmull_high_lane_u32", + "vmull_high_laneq_u32", + "vmull_high_n_s16", + "vmull_high_n_s32", + "vmull_high_n_u16", + "vmull_high_n_u32", + "vmull_high_p64", + "vmull_high_p8", + "vmull_high_s8", + "vmull_high_s16", + "vmull_high_s32", + "vmull_high_u8", + "vmull_high_u16", + "vmull_high_u32", + "vmull_p64", + "vmulq_lane_f64", + "vmulq_laneq_f64", + "vmuls_lane_f32", + "vmuls_laneq_f32", + "vmuld_laneq_f64", + "vmulx_f16", + "vmulxq_f16", + "vmulx_f32", + "vmulxq_f32", + "vmulx_f64", + "vmulxq_f64", + "vmulx_lane_f16", + "vmulx_laneq_f16", + "vmulxq_lane_f16", + "vmulxq_laneq_f16", + "vmulx_lane_f32", + "vmulx_laneq_f32", + "vmulxq_lane_f32", + "vmulxq_laneq_f32", + "vmulxq_laneq_f64", + "vmulx_lane_f64", + "vmulx_laneq_f64", + "vmulx_n_f16", + "vmulxq_n_f16", + "vmulxd_f64", + "vmulxs_f32", + "vmulxd_lane_f64", + "vmulxd_laneq_f64", + "vmulxs_lane_f32", + "vmulxs_laneq_f32", + "vmulxh_f16", + "vmulxh_lane_f16", + "vmulxh_laneq_f16", + "vmulxq_lane_f64", + "vneg_f64", + "vnegq_f64", + "vneg_s64", + "vnegq_s64", + "vnegd_s64", + "vnegh_f16", + "vpaddd_f64", + "vpadds_f32", + "vpaddd_s64", + "vpaddd_u64", + "vpaddq_f16", + "vpaddq_f32", + "vpaddq_f64", + "vpaddq_s8", + "vpaddq_s16", + "vpaddq_s32", + "vpaddq_s64", + "vpaddq_u8", + "vpaddq_u16", + "vpaddq_u32", + "vpaddq_u64", + "vpmax_f16", + "vpmaxq_f16", + "vpmaxnm_f16", + "vpmaxnmq_f16", + "vpmaxnm_f32", + "vpmaxnmq_f32", + "vpmaxnmq_f64", + "vpmaxnmqd_f64", + "vpmaxnms_f32", + "vpmaxq_s8", + "vpmaxq_s16", + "vpmaxq_s32", + "vpmaxq_u8", + "vpmaxq_u16", + "vpmaxq_u32", + "vpmaxqd_f64", + "vpmaxs_f32", + "vpmin_f16", + "vpminq_f16", + "vpminnm_f16", + "vpminnmq_f16", + "vpminnm_f32", + "vpminnmq_f32", + "vpminnmq_f64", + "vpminnmqd_f64", + "vpminnms_f32", + "vpminq_s8", + "vpminq_s16", + "vpminq_s32", + "vpminq_u8", + "vpminq_u16", + "vpminq_u32", + "vpminqd_f64", + "vpmins_f32", + "vqabs_s64", + "vqabsq_s64", + "vqabsb_s8", + "vqabsh_s16", + "vqabss_s32", + "vqabsd_s64", + "vqaddb_s8", + "vqaddh_s16", + "vqaddb_u8", + "vqaddh_u16", + "vqadds_s32", + "vqaddd_s64", + "vqadds_u32", + "vqaddd_u64", + "vqdmlal_high_lane_s16", + "vqdmlal_high_laneq_s16", + "vqdmlal_high_lane_s32", + "vqdmlal_high_laneq_s32", + "vqdmlal_high_n_s16", + "vqdmlal_high_s16", + "vqdmlal_high_n_s32", + "vqdmlal_high_s32", + "vqdmlal_laneq_s16", + "vqdmlal_laneq_s32", + "vqdmlalh_lane_s16", + "vqdmlalh_laneq_s16", + "vqdmlals_lane_s32", + "vqdmlals_laneq_s32", + "vqdmlalh_s16", + "vqdmlals_s32", + "vqdmlsl_high_lane_s16", + "vqdmlsl_high_laneq_s16", + "vqdmlsl_high_lane_s32", + "vqdmlsl_high_laneq_s32", + "vqdmlsl_high_n_s16", + "vqdmlsl_high_s16", + "vqdmlsl_high_n_s32", + "vqdmlsl_high_s32", + "vqdmlsl_laneq_s16", + "vqdmlsl_laneq_s32", + "vqdmlslh_lane_s16", + "vqdmlslh_laneq_s16", + "vqdmlsls_lane_s32", + "vqdmlsls_laneq_s32", + "vqdmlslh_s16", + "vqdmlsls_s32", + "vqdmulh_lane_s16", + "vqdmulhq_lane_s16", + "vqdmulh_lane_s32", + "vqdmulhq_lane_s32", + "vqdmulhh_lane_s16", + "vqdmulhh_laneq_s16", + "vqdmulhh_s16", + "vqdmulhs_s32", + "vqdmulhs_lane_s32", + "vqdmulhs_laneq_s32", + "vqdmull_high_lane_s16", + "vqdmull_high_laneq_s32", + "vqdmull_high_lane_s32", + "vqdmull_high_laneq_s16", + "vqdmull_high_n_s16", + "vqdmull_high_n_s32", + "vqdmull_high_s16", + "vqdmull_high_s32", + "vqdmull_laneq_s16", + "vqdmull_laneq_s32", + "vqdmullh_lane_s16", + "vqdmulls_laneq_s32", + "vqdmullh_laneq_s16", + "vqdmullh_s16", + "vqdmulls_lane_s32", + "vqdmulls_s32", + "vqmovn_high_s16", + "vqmovn_high_s32", + "vqmovn_high_s64", + "vqmovn_high_u16", + "vqmovn_high_u32", + "vqmovn_high_u64", + "vqmovnd_s64", + "vqmovnd_u64", + "vqmovnh_s16", + "vqmovns_s32", + "vqmovnh_u16", + "vqmovns_u32", + "vqmovun_high_s16", + "vqmovun_high_s32", + "vqmovun_high_s64", + "vqmovunh_s16", + "vqmovuns_s32", + "vqmovund_s64", + "vqneg_s64", + "vqnegq_s64", + "vqnegb_s8", + "vqnegh_s16", + "vqnegs_s32", + "vqnegd_s64", + "vqrdmlah_lane_s16", + "vqrdmlah_lane_s32", + "vqrdmlah_laneq_s16", + "vqrdmlah_laneq_s32", + "vqrdmlahq_lane_s16", + "vqrdmlahq_lane_s32", + "vqrdmlahq_laneq_s16", + "vqrdmlahq_laneq_s32", + "vqrdmlah_s16", + "vqrdmlahq_s16", + "vqrdmlah_s32", + "vqrdmlahq_s32", + "vqrdmlahh_lane_s16", + "vqrdmlahh_laneq_s16", + "vqrdmlahs_lane_s32", + "vqrdmlahs_laneq_s32", + "vqrdmlahh_s16", + "vqrdmlahs_s32", + "vqrdmlsh_lane_s16", + "vqrdmlsh_lane_s32", + "vqrdmlsh_laneq_s16", + "vqrdmlsh_laneq_s32", + "vqrdmlshq_lane_s16", + "vqrdmlshq_lane_s32", + "vqrdmlshq_laneq_s16", + "vqrdmlshq_laneq_s32", + "vqrdmlsh_s16", + "vqrdmlshq_s16", + "vqrdmlsh_s32", + "vqrdmlshq_s32", + "vqrdmlshh_lane_s16", + "vqrdmlshh_laneq_s16", + "vqrdmlshs_lane_s32", + "vqrdmlshs_laneq_s32", + "vqrdmlshh_s16", + "vqrdmlshs_s32", + "vqrdmulhh_lane_s16", + "vqrdmulhh_laneq_s16", + "vqrdmulhs_lane_s32", + "vqrdmulhs_laneq_s32", + "vqrdmulhh_s16", + "vqrdmulhs_s32", + "vqrshlb_s8", + "vqrshlh_s16", + "vqrshlb_u8", + "vqrshlh_u16", + "vqrshld_s64", + "vqrshls_s32", + "vqrshls_u32", + "vqrshld_u64", + "vqrshrn_high_n_s16", + "vqrshrn_high_n_s32", + "vqrshrn_high_n_s64", + "vqrshrn_high_n_u16", + "vqrshrn_high_n_u32", + "vqrshrn_high_n_u64", + "vqrshrnd_n_u64", + "vqrshrnh_n_u16", + "vqrshrns_n_u32", + "vqrshrnh_n_s16", + "vqrshrns_n_s32", + "vqrshrnd_n_s64", + "vqrshrun_high_n_s16", + "vqrshrun_high_n_s32", + "vqrshrun_high_n_s64", + "vqrshrund_n_s64", + "vqrshrunh_n_s16", + "vqrshruns_n_s32", + "vqshlb_n_s8", + "vqshld_n_s64", + "vqshlh_n_s16", + "vqshls_n_s32", + "vqshlb_n_u8", + "vqshld_n_u64", + "vqshlh_n_u16", + "vqshls_n_u32", + "vqshlb_s8", + "vqshlh_s16", + "vqshls_s32", + "vqshlb_u8", + "vqshlh_u16", + "vqshls_u32", + "vqshld_s64", + "vqshld_u64", + "vqshlub_n_s8", + "vqshlud_n_s64", + "vqshluh_n_s16", + "vqshlus_n_s32", + "vqshrn_high_n_s16", + "vqshrn_high_n_s32", + "vqshrn_high_n_s64", + "vqshrn_high_n_u16", + "vqshrn_high_n_u32", + "vqshrn_high_n_u64", + "vqshrnd_n_s64", + "vqshrnd_n_u64", + "vqshrnh_n_s16", + "vqshrns_n_s32", + "vqshrnh_n_u16", + "vqshrns_n_u32", + "vqshrun_high_n_s16", + "vqshrun_high_n_s32", + "vqshrun_high_n_s64", + "vqshrund_n_s64", + "vqshrunh_n_s16", + "vqshruns_n_s32", + "vqsubb_s8", + "vqsubh_s16", + "vqsubb_u8", + "vqsubh_u16", + "vqsubs_s32", + "vqsubd_s64", + "vqsubs_u32", + "vqsubd_u64", + "vrax1q_u64", + "vrbit_s8", + "vrbitq_s8", + "vrbit_u8", + "vrbit_u8", + "vrbitq_u8", + "vrbitq_u8", + "vrbit_p8", + "vrbit_p8", + "vrbitq_p8", + "vrbitq_p8", + "vrecpe_f64", + "vrecpeq_f64", + "vrecped_f64", + "vrecpes_f32", + "vrecpeh_f16", + "vrecps_f64", + "vrecpsq_f64", + "vrecpsd_f64", + "vrecpss_f32", + "vrecpsh_f16", + "vrecpxd_f64", + "vrecpxs_f32", + "vrecpxh_f16", + "vreinterpret_f64_f16", + "vreinterpret_f64_f16", + "vreinterpretq_f64_f16", + "vreinterpretq_f64_f16", + "vreinterpret_f16_f64", + "vreinterpret_f16_f64", + "vreinterpretq_f16_f64", + "vreinterpretq_f16_f64", + "vreinterpretq_f64_p128", + "vreinterpretq_f64_p128", + "vreinterpret_f64_f32", + "vreinterpret_f64_f32", + "vreinterpret_p64_f32", + "vreinterpret_p64_f32", + "vreinterpretq_f64_f32", + "vreinterpretq_f64_f32", + "vreinterpretq_p64_f32", + "vreinterpretq_p64_f32", + "vreinterpret_f32_f64", + "vreinterpret_f32_f64", + "vreinterpret_s8_f64", + "vreinterpret_s8_f64", + "vreinterpret_s16_f64", + "vreinterpret_s16_f64", + "vreinterpret_s32_f64", + "vreinterpret_s32_f64", + "vreinterpret_s64_f64", + "vreinterpret_u8_f64", + "vreinterpret_u8_f64", + "vreinterpret_u16_f64", + "vreinterpret_u16_f64", + "vreinterpret_u32_f64", + "vreinterpret_u32_f64", + "vreinterpret_u64_f64", + "vreinterpret_p8_f64", + "vreinterpret_p8_f64", + "vreinterpret_p16_f64", + "vreinterpret_p16_f64", + "vreinterpret_p64_f64", + "vreinterpretq_p128_f64", + "vreinterpretq_p128_f64", + "vreinterpretq_f32_f64", + "vreinterpretq_f32_f64", + "vreinterpretq_s8_f64", + "vreinterpretq_s8_f64", + "vreinterpretq_s16_f64", + "vreinterpretq_s16_f64", + "vreinterpretq_s32_f64", + "vreinterpretq_s32_f64", + "vreinterpretq_s64_f64", + "vreinterpretq_s64_f64", + "vreinterpretq_u8_f64", + "vreinterpretq_u8_f64", + "vreinterpretq_u16_f64", + "vreinterpretq_u16_f64", + "vreinterpretq_u32_f64", + "vreinterpretq_u32_f64", + "vreinterpretq_u64_f64", + "vreinterpretq_u64_f64", + "vreinterpretq_p8_f64", + "vreinterpretq_p8_f64", + "vreinterpretq_p16_f64", + "vreinterpretq_p16_f64", + "vreinterpretq_p64_f64", + "vreinterpretq_p64_f64", + "vreinterpret_f64_s8", + "vreinterpret_f64_s8", + "vreinterpretq_f64_s8", + "vreinterpretq_f64_s8", + "vreinterpret_f64_s16", + "vreinterpret_f64_s16", + "vreinterpretq_f64_s16", + "vreinterpretq_f64_s16", + "vreinterpret_f64_s32", + "vreinterpret_f64_s32", + "vreinterpretq_f64_s32", + "vreinterpretq_f64_s32", + "vreinterpret_f64_s64", + "vreinterpret_p64_s64", + "vreinterpretq_f64_s64", + "vreinterpretq_f64_s64", + "vreinterpretq_p64_s64", + "vreinterpretq_p64_s64", + "vreinterpret_f64_u8", + "vreinterpret_f64_u8", + "vreinterpretq_f64_u8", + "vreinterpretq_f64_u8", + "vreinterpret_f64_u16", + "vreinterpret_f64_u16", + "vreinterpretq_f64_u16", + "vreinterpretq_f64_u16", + "vreinterpret_f64_u32", + "vreinterpret_f64_u32", + "vreinterpretq_f64_u32", + "vreinterpretq_f64_u32", + "vreinterpret_f64_u64", + "vreinterpret_p64_u64", + "vreinterpretq_f64_u64", + "vreinterpretq_f64_u64", + "vreinterpretq_p64_u64", + "vreinterpretq_p64_u64", + "vreinterpret_f64_p8", + "vreinterpret_f64_p8", + "vreinterpretq_f64_p8", + "vreinterpretq_f64_p8", + "vreinterpret_f64_p16", + "vreinterpret_f64_p16", + "vreinterpretq_f64_p16", + "vreinterpretq_f64_p16", + "vreinterpret_f32_p64", + "vreinterpret_f32_p64", + "vreinterpret_f64_p64", + "vreinterpret_s64_p64", + "vreinterpret_u64_p64", + "vreinterpretq_f32_p64", + "vreinterpretq_f32_p64", + "vreinterpretq_f64_p64", + "vreinterpretq_f64_p64", + "vreinterpretq_s64_p64", + "vreinterpretq_s64_p64", + "vreinterpretq_u64_p64", + "vreinterpretq_u64_p64", + "vrnd32x_f32", + "vrnd32xq_f32", + "vrnd32xq_f64", + "vrnd32x_f64", + "vrnd32z_f32", + "vrnd32zq_f32", + "vrnd32zq_f64", + "vrnd32z_f64", + "vrnd64x_f32", + "vrnd64xq_f32", + "vrnd64xq_f64", + "vrnd64x_f64", + "vrnd64z_f32", + "vrnd64zq_f32", + "vrnd64zq_f64", + "vrnd64z_f64", + "vrnd_f16", + "vrndq_f16", + "vrnd_f32", + "vrndq_f32", + "vrnd_f64", + "vrndq_f64", + "vrnda_f16", + "vrndaq_f16", + "vrnda_f32", + "vrndaq_f32", + "vrnda_f64", + "vrndaq_f64", + "vrndah_f16", + "vrndh_f16", + "vrndi_f16", + "vrndiq_f16", + "vrndi_f32", + "vrndiq_f32", + "vrndi_f64", + "vrndiq_f64", + "vrndih_f16", + "vrndm_f16", + "vrndmq_f16", + "vrndm_f32", + "vrndmq_f32", + "vrndm_f64", + "vrndmq_f64", + "vrndmh_f16", + "vrndn_f64", + "vrndnq_f64", + "vrndnh_f16", + "vrndns_f32", + "vrndp_f16", + "vrndpq_f16", + "vrndp_f32", + "vrndpq_f32", + "vrndp_f64", + "vrndpq_f64", + "vrndph_f16", + "vrndx_f16", + "vrndxq_f16", + "vrndx_f32", + "vrndxq_f32", + "vrndx_f64", + "vrndxq_f64", + "vrndxh_f16", + "vrshld_s64", + "vrshld_u64", + "vrshrd_n_s64", + "vrshrd_n_u64", + "vrshrn_high_n_s16", + "vrshrn_high_n_s32", + "vrshrn_high_n_s64", + "vrshrn_high_n_u16", + "vrshrn_high_n_u32", + "vrshrn_high_n_u64", + "vrsqrte_f64", + "vrsqrteq_f64", + "vrsqrted_f64", + "vrsqrtes_f32", + "vrsqrteh_f16", + "vrsqrts_f64", + "vrsqrtsq_f64", + "vrsqrtsd_f64", + "vrsqrtss_f32", + "vrsqrtsh_f16", + "vrsrad_n_s64", + "vrsrad_n_u64", + "vrsubhn_high_s16", + "vrsubhn_high_s32", + "vrsubhn_high_s64", + "vrsubhn_high_u16", + "vrsubhn_high_u32", + "vrsubhn_high_u64", + "vrsubhn_high_s16", + "vrsubhn_high_s32", + "vrsubhn_high_s64", + "vrsubhn_high_u16", + "vrsubhn_high_u32", + "vrsubhn_high_u64", + "vscale_f16", + "vscaleq_f16", + "vscale_f32", + "vscaleq_f32", + "vscaleq_f64", + "vset_lane_f64", + "vsetq_lane_f64", + "vsha512h2q_u64", + "vsha512hq_u64", + "vsha512su0q_u64", + "vsha512su1q_u64", + "vshld_s64", + "vshld_u64", + "vshll_high_n_s8", + "vshll_high_n_s16", + "vshll_high_n_s32", + "vshll_high_n_u8", + "vshll_high_n_u16", + "vshll_high_n_u32", + "vshrn_high_n_s16", + "vshrn_high_n_s32", + "vshrn_high_n_s64", + "vshrn_high_n_u16", + "vshrn_high_n_u32", + "vshrn_high_n_u64", + "vslid_n_s64", + "vslid_n_u64", + "vsm3partw1q_u32", + "vsm3partw2q_u32", + "vsm3ss1q_u32", + "vsm3tt1aq_u32", + "vsm3tt1bq_u32", + "vsm3tt2aq_u32", + "vsm3tt2bq_u32", + "vsm4ekeyq_u32", + "vsm4eq_u32", + "vsqadd_u8", + "vsqaddq_u8", + "vsqadd_u16", + "vsqaddq_u16", + "vsqadd_u32", + "vsqaddq_u32", + "vsqadd_u64", + "vsqaddq_u64", + "vsqaddb_u8", + "vsqaddh_u16", + "vsqaddd_u64", + "vsqadds_u32", + "vsqrt_f16", + "vsqrtq_f16", + "vsqrt_f32", + "vsqrtq_f32", + "vsqrt_f64", + "vsqrtq_f64", + "vsqrth_f16", + "vsrid_n_s64", + "vsrid_n_u64", + "vst1_f16", + "vst1q_f16", + "vst1_f64_x2", + "vst1q_f64_x2", + "vst1_f64_x3", + "vst1q_f64_x3", + "vst1_f64_x4", + "vst1q_f64_x4", + "vst1_lane_f64", + "vst1q_lane_f64", + "vst2_f64", + "vst2_lane_f64", + "vst2_lane_s64", + "vst2_lane_p64", + "vst2_lane_u64", + "vst2q_f64", + "vst2q_s64", + "vst2q_lane_f64", + "vst2q_lane_s8", + "vst2q_lane_s64", + "vst2q_lane_p64", + "vst2q_lane_u8", + "vst2q_lane_u64", + "vst2q_lane_p8", + "vst2q_p64", + "vst2q_u64", + "vst3_f64", + "vst3_lane_f64", + "vst3_lane_s64", + "vst3_lane_p64", + "vst3_lane_u64", + "vst3q_f64", + "vst3q_s64", + "vst3q_lane_f64", + "vst3q_lane_s8", + "vst3q_lane_s64", + "vst3q_lane_p64", + "vst3q_lane_u8", + "vst3q_lane_u64", + "vst3q_lane_p8", + "vst3q_p64", + "vst3q_u64", + "vst4_f64", + "vst4_lane_f64", + "vst4_lane_s64", + "vst4_lane_p64", + "vst4_lane_u64", + "vst4q_f64", + "vst4q_s64", + "vst4q_lane_f64", + "vst4q_lane_s8", + "vst4q_lane_s64", + "vst4q_lane_p64", + "vst4q_lane_u8", + "vst4q_lane_u64", + "vst4q_lane_p8", + "vst4q_p64", + "vst4q_u64", + "vstl1_lane_f64", + "vstl1q_lane_f64", + "vstl1_lane_u64", + "vstl1q_lane_u64", + "vstl1_lane_p64", + "vstl1q_lane_p64", + "vstl1_lane_s64", + "vstl1q_lane_s64", + "vsub_f64", + "vsubq_f64", + "vsubd_s64", + "vsubd_u64", + "vsubh_f16", + "vsubl_high_s8", + "vsubl_high_s16", + "vsubl_high_s32", + "vsubl_high_u8", + "vsubl_high_u16", + "vsubl_high_u32", + "vsubw_high_s8", + "vsubw_high_s16", + "vsubw_high_s32", + "vsubw_high_u8", + "vsubw_high_u16", + "vsubw_high_u32", + "vtrn1_f16", + "vtrn1q_f16", + "vtrn1_f32", + "vtrn1q_f64", + "vtrn1_s32", + "vtrn1q_s64", + "vtrn1_u32", + "vtrn1q_u64", + "vtrn1q_p64", + "vtrn1q_f32", + "vtrn1_s8", + "vtrn1q_s8", + "vtrn1_s16", + "vtrn1q_s16", + "vtrn1q_s32", + "vtrn1_u8", + "vtrn1q_u8", + "vtrn1_u16", + "vtrn1q_u16", + "vtrn1q_u32", + "vtrn1_p8", + "vtrn1q_p8", + "vtrn1_p16", + "vtrn1q_p16", + "vtrn2_f16", + "vtrn2q_f16", + "vtrn2_f32", + "vtrn2q_f64", + "vtrn2_s32", + "vtrn2q_s64", + "vtrn2_u32", + "vtrn2q_u64", + "vtrn2q_p64", + "vtrn2q_f32", + "vtrn2_s8", + "vtrn2q_s8", + "vtrn2_s16", + "vtrn2q_s16", + "vtrn2q_s32", + "vtrn2_u8", + "vtrn2q_u8", + "vtrn2_u16", + "vtrn2q_u16", + "vtrn2q_u32", + "vtrn2_p8", + "vtrn2q_p8", + "vtrn2_p16", + "vtrn2q_p16", + "vtst_s64", + "vtstq_s64", + "vtst_p64", + "vtstq_p64", + "vtst_u64", + "vtstq_u64", + "vtstd_s64", + "vtstd_u64", + "vuqadd_s8", + "vuqaddq_s8", + "vuqadd_s16", + "vuqaddq_s16", + "vuqadd_s32", + "vuqaddq_s32", + "vuqadd_s64", + "vuqaddq_s64", + "vuqaddb_s8", + "vuqaddh_s16", + "vuqaddd_s64", + "vuqadds_s32", + "vuzp1_f16", + "vuzp1q_f16", + "vuzp1_f32", + "vuzp1q_f64", + "vuzp1_s32", + "vuzp1q_s64", + "vuzp1_u32", + "vuzp1q_u64", + "vuzp1q_p64", + "vuzp1q_f32", + "vuzp1_s8", + "vuzp1q_s8", + "vuzp1_s16", + "vuzp1q_s16", + "vuzp1q_s32", + "vuzp1_u8", + "vuzp1q_u8", + "vuzp1_u16", + "vuzp1q_u16", + "vuzp1q_u32", + "vuzp1_p8", + "vuzp1q_p8", + "vuzp1_p16", + "vuzp1q_p16", + "vuzp2_f16", + "vuzp2q_f16", + "vuzp2_f32", + "vuzp2q_f64", + "vuzp2_s32", + "vuzp2q_s64", + "vuzp2_u32", + "vuzp2q_u64", + "vuzp2q_p64", + "vuzp2q_f32", + "vuzp2_s8", + "vuzp2q_s8", + "vuzp2_s16", + "vuzp2q_s16", + "vuzp2q_s32", + "vuzp2_u8", + "vuzp2q_u8", + "vuzp2_u16", + "vuzp2q_u16", + "vuzp2q_u32", + "vuzp2_p8", + "vuzp2q_p8", + "vuzp2_p16", + "vuzp2q_p16", + "vxarq_u64", + "vzip1_f16", + "vzip1q_f16", + "vzip1_f32", + "vzip1q_f32", + "vzip1q_f64", + "vzip1_s8", + "vzip1q_s8", + "vzip1_s16", + "vzip1q_s16", + "vzip1_s32", + "vzip1q_s32", + "vzip1q_s64", + "vzip1_u8", + "vzip1q_u8", + "vzip1_u16", + "vzip1q_u16", + "vzip1_u32", + "vzip1q_u32", + "vzip1q_u64", + "vzip1_p8", + "vzip1q_p8", + "vzip1_p16", + "vzip1q_p16", + "vzip1q_p64", + "vzip2_f16", + "vzip2q_f16", + "vzip2_f32", + "vzip2q_f32", + "vzip2q_f64", + "vzip2_s8", + "vzip2q_s8", + "vzip2_s16", + "vzip2q_s16", + "vzip2_s32", + "vzip2q_s32", + "vzip2q_s64", + "vzip2_u8", + "vzip2q_u8", + "vzip2_u16", + "vzip2q_u16", + "vzip2_u32", + "vzip2q_u32", + "vzip2q_u64", + "vzip2_p8", + "vzip2q_p8", + "vzip2_p16", + "vzip2q_p16", + "vzip2q_p64", + "__crc32b", + "__crc32cb", + "__crc32cd", + "__crc32ch", + "__crc32cw", + "__crc32d", + "__crc32h", + "__crc32w", + "vabal_s8", + "vabal_s16", + "vabal_s32", + "vabal_u8", + "vabal_u16", + "vabal_u32", + "vabd_f16", + "vabdq_f16", + "vabd_f32", + "vabdq_f32", + "vabd_s8", + "vabdq_s8", + "vabd_s16", + "vabdq_s16", + "vabd_s32", + "vabdq_s32", + "vabd_u8", + "vabdq_u8", + "vabd_u16", + "vabdq_u16", + "vabd_u32", + "vabdq_u32", + "vabdl_s8", + "vabdl_s16", + "vabdl_s32", + "vabdl_u8", + "vabdl_u16", + "vabdl_u32", + "vabs_f16", + "vabsq_f16", + "vabs_f32", + "vabsq_f32", + "vabs_s8", + "vabsq_s8", + "vabs_s16", + "vabsq_s16", + "vabs_s32", + "vabsq_s32", + "vabsh_f16", + "vadd_f16", + "vaddq_f16", + "vadd_p8", + "vaddq_p8", + "vadd_p16", + "vaddq_p16", + "vadd_p64", + "vaddq_p64", + "vaddh_f16", + "vaddhn_high_s16", + "vaddhn_high_s32", + "vaddhn_high_s64", + "vaddhn_high_u16", + "vaddhn_high_u32", + "vaddhn_high_u64", + "vaddhn_s16", + "vaddhn_s32", + "vaddhn_s64", + "vaddhn_u16", + "vaddhn_u32", + "vaddhn_u64", + "vaddq_p128", + "vaesdq_u8", + "vaeseq_u8", + "vaesimcq_u8", + "vaesmcq_u8", + "vbsl_f16", + "vbslq_f16", + "vcage_f16", + "vcageq_f16", + "vcage_f32", + "vcageq_f32", + "vcagt_f16", + "vcagtq_f16", + "vcagt_f32", + "vcagtq_f32", + "vcale_f16", + "vcaleq_f16", + "vcale_f32", + "vcaleq_f32", + "vcalt_f16", + "vcaltq_f16", + "vcalt_f32", + "vcaltq_f32", + "vceq_f16", + "vceqq_f16", + "vceq_p8", + "vceqq_p8", + "vcge_f16", + "vcgeq_f16", + "vcgez_f16", + "vcgezq_f16", + "vcgt_f16", + "vcgtq_f16", + "vcgtz_f16", + "vcgtzq_f16", + "vcle_f16", + "vcleq_f16", + "vclez_f16", + "vclezq_f16", + "vcls_s8", + "vclsq_s8", + "vcls_s16", + "vclsq_s16", + "vcls_s32", + "vclsq_s32", + "vcls_u8", + "vclsq_u8", + "vcls_u16", + "vclsq_u16", + "vcls_u32", + "vclsq_u32", + "vclt_f16", + "vcltq_f16", + "vcltz_f16", + "vcltzq_f16", + "vclz_s8", + "vclzq_s8", + "vclz_s16", + "vclzq_s16", + "vclz_s32", + "vclzq_s32", + "vclz_u16", + "vclz_u16", + "vclzq_u16", + "vclzq_u16", + "vclz_u32", + "vclz_u32", + "vclzq_u32", + "vclzq_u32", + "vclz_u8", + "vclz_u8", + "vclzq_u8", + "vclzq_u8", + "vcnt_s8", + "vcntq_s8", + "vcnt_u8", + "vcnt_u8", + "vcntq_u8", + "vcntq_u8", + "vcnt_p8", + "vcnt_p8", + "vcntq_p8", + "vcntq_p8", + "vcombine_f16", + "vcreate_f16", + "vcreate_f16", + "vcreate_f32", + "vcreate_f32", + "vcreate_s8", + "vcreate_s8", + "vcreate_s16", + "vcreate_s16", + "vcreate_s32", + "vcreate_s32", + "vcreate_s64", + "vcreate_u8", + "vcreate_u8", + "vcreate_u16", + "vcreate_u16", + "vcreate_u32", + "vcreate_u32", + "vcreate_u64", + "vcreate_p8", + "vcreate_p8", + "vcreate_p16", + "vcreate_p16", + "vcreate_p64", + "vcvt_f16_f32", + "vcvt_f16_s16", + "vcvtq_f16_s16", + "vcvt_f16_u16", + "vcvtq_f16_u16", + "vcvt_f32_f16", + "vcvt_f32_s32", + "vcvtq_f32_s32", + "vcvt_f32_u32", + "vcvtq_f32_u32", + "vcvt_n_f16_s16", + "vcvtq_n_f16_s16", + "vcvt_n_f16_u16", + "vcvtq_n_f16_u16", + "vcvt_n_f32_s32", + "vcvtq_n_f32_s32", + "vcvt_n_f32_s32", + "vcvtq_n_f32_s32", + "vcvt_n_f32_u32", + "vcvtq_n_f32_u32", + "vcvt_n_f32_u32", + "vcvtq_n_f32_u32", + "vcvt_n_s16_f16", + "vcvtq_n_s16_f16", + "vcvt_n_s32_f32", + "vcvtq_n_s32_f32", + "vcvt_n_s32_f32", + "vcvtq_n_s32_f32", + "vcvt_n_u16_f16", + "vcvtq_n_u16_f16", + "vcvt_n_u32_f32", + "vcvtq_n_u32_f32", + "vcvt_n_u32_f32", + "vcvtq_n_u32_f32", + "vcvt_s16_f16", + "vcvtq_s16_f16", + "vcvt_s32_f32", + "vcvtq_s32_f32", + "vcvt_u16_f16", + "vcvtq_u16_f16", + "vcvt_u32_f32", + "vcvtq_u32_f32", + "vdot_lane_s32", + "vdot_lane_s32", + "vdotq_lane_s32", + "vdotq_lane_s32", + "vdot_lane_u32", + "vdot_lane_u32", + "vdotq_lane_u32", + "vdotq_lane_u32", + "vdot_laneq_s32", + "vdot_laneq_s32", + "vdotq_laneq_s32", + "vdotq_laneq_s32", + "vdot_laneq_u32", + "vdot_laneq_u32", + "vdotq_laneq_u32", + "vdotq_laneq_u32", + "vdot_s32", + "vdotq_s32", + "vdot_u32", + "vdotq_u32", + "vdup_lane_f16", + "vdupq_lane_f16", + "vdup_lane_f32", + "vdup_lane_s32", + "vdup_lane_u32", + "vdupq_lane_f32", + "vdupq_lane_s32", + "vdupq_lane_u32", + "vdup_lane_p16", + "vdup_lane_s16", + "vdup_lane_u16", + "vdupq_lane_p16", + "vdupq_lane_s16", + "vdupq_lane_u16", + "vdup_lane_p8", + "vdup_lane_s8", + "vdup_lane_u8", + "vdupq_lane_p8", + "vdupq_lane_s8", + "vdupq_lane_u8", + "vdup_lane_s64", + "vdup_lane_u64", + "vdup_laneq_f16", + "vdupq_laneq_f16", + "vdup_laneq_f32", + "vdup_laneq_s32", + "vdup_laneq_u32", + "vdupq_laneq_f32", + "vdupq_laneq_s32", + "vdupq_laneq_u32", + "vdup_laneq_p16", + "vdup_laneq_s16", + "vdup_laneq_u16", + "vdupq_laneq_p16", + "vdupq_laneq_s16", + "vdupq_laneq_u16", + "vdup_laneq_p8", + "vdup_laneq_s8", + "vdup_laneq_u8", + "vdupq_laneq_p8", + "vdupq_laneq_s8", + "vdupq_laneq_u8", + "vdup_laneq_s64", + "vdup_laneq_u64", + "vdup_n_f16", + "vdupq_n_f16", + "vdupq_lane_s64", + "vdupq_lane_u64", + "vdupq_laneq_s64", + "vdupq_laneq_u64", + "vext_f16", + "vext_f32", + "vext_s32", + "vext_u32", + "vext_s8", + "vextq_s16", + "vext_u8", + "vextq_u16", + "vext_p8", + "vextq_p16", + "vextq_f16", + "vextq_f32", + "vext_s16", + "vextq_s32", + "vext_u16", + "vextq_u32", + "vext_p16", + "vextq_s64", + "vextq_u64", + "vextq_s8", + "vextq_u8", + "vextq_p8", + "vfma_f16", + "vfmaq_f16", + "vfma_f32", + "vfmaq_f32", + "vfma_n_f32", + "vfmaq_n_f32", + "vfms_f16", + "vfmsq_f16", + "vfms_f32", + "vfmsq_f32", + "vfms_n_f32", + "vfmsq_n_f32", + "vget_high_f16", + "vget_low_f16", + "vget_lane_f16", + "vgetq_lane_f16", + "vld1_dup_f16", + "vld1q_dup_f16", + "vld1_f16", + "vld1_f16", + "vld1q_f16", + "vld1q_f16", + "vld1_f16_x2", + "vld1_f16_x3", + "vld1_f16_x4", + "vld1q_f16_x2", + "vld1q_f16_x3", + "vld1q_f16_x4", + "vld1_f32_x2", + "vld1_f32_x3", + "vld1_f32_x4", + "vld1q_f32_x2", + "vld1q_f32_x3", + "vld1q_f32_x4", + "vld1_lane_f16", + "vld1q_lane_f16", + "vld1_p64_x2", + "vld1_p64_x3", + "vld1_p64_x4", + "vld1q_p64_x2", + "vld1q_p64_x3", + "vld1q_p64_x4", + "vld1_s8_x2", + "vld1_s8_x3", + "vld1_s8_x4", + "vld1q_s8_x2", + "vld1q_s8_x3", + "vld1q_s8_x4", + "vld1_s16_x2", + "vld1_s16_x3", + "vld1_s16_x4", + "vld1q_s16_x2", + "vld1q_s16_x3", + "vld1q_s16_x4", + "vld1_s32_x2", + "vld1_s32_x3", + "vld1_s32_x4", + "vld1q_s32_x2", + "vld1q_s32_x3", + "vld1q_s32_x4", + "vld1_s64_x2", + "vld1_s64_x3", + "vld1_s64_x4", + "vld1q_s64_x2", + "vld1q_s64_x3", + "vld1q_s64_x4", + "vld1_u8_x2", + "vld1_u8_x3", + "vld1_u8_x4", + "vld1q_u8_x2", + "vld1q_u8_x3", + "vld1q_u8_x4", + "vld1_u16_x2", + "vld1_u16_x3", + "vld1_u16_x4", + "vld1q_u16_x2", + "vld1q_u16_x3", + "vld1q_u16_x4", + "vld1_u32_x2", + "vld1_u32_x3", + "vld1_u32_x4", + "vld1q_u32_x2", + "vld1q_u32_x3", + "vld1q_u32_x4", + "vld1_u64_x2", + "vld1_u64_x3", + "vld1_u64_x4", + "vld1q_u64_x2", + "vld1q_u64_x3", + "vld1q_u64_x4", + "vld1_p8_x2", + "vld1_p8_x3", + "vld1_p8_x4", + "vld1q_p8_x2", + "vld1q_p8_x3", + "vld1q_p8_x4", + "vld1_p16_x2", + "vld1_p16_x3", + "vld1_p16_x4", + "vld1q_p16_x2", + "vld1q_p16_x3", + "vld1q_p16_x4", + "vld2_dup_f16", + "vld2q_dup_f16", + "vld2_dup_f16", + "vld2q_dup_f16", + "vld2_dup_f32", + "vld2q_dup_f32", + "vld2_dup_s8", + "vld2q_dup_s8", + "vld2_dup_s16", + "vld2q_dup_s16", + "vld2_dup_s32", + "vld2q_dup_s32", + "vld2_dup_f32", + "vld2q_dup_f32", + "vld2_dup_s8", + "vld2q_dup_s8", + "vld2_dup_s16", + "vld2q_dup_s16", + "vld2_dup_s32", + "vld2q_dup_s32", + "vld2_dup_p64", + "vld2_dup_s64", + "vld2_dup_s64", + "vld2_dup_u64", + "vld2_dup_u8", + "vld2_dup_u8", + "vld2q_dup_u8", + "vld2q_dup_u8", + "vld2_dup_u16", + "vld2_dup_u16", + "vld2q_dup_u16", + "vld2q_dup_u16", + "vld2_dup_u32", + "vld2_dup_u32", + "vld2q_dup_u32", + "vld2q_dup_u32", + "vld2_dup_p8", + "vld2_dup_p8", + "vld2q_dup_p8", + "vld2q_dup_p8", + "vld2_dup_p16", + "vld2_dup_p16", + "vld2q_dup_p16", + "vld2q_dup_p16", + "vld2_f16", + "vld2q_f16", + "vld2_f16", + "vld2q_f16", + "vld2_f32", + "vld2q_f32", + "vld2_s8", + "vld2q_s8", + "vld2_s16", + "vld2q_s16", + "vld2_s32", + "vld2q_s32", + "vld2_f32", + "vld2q_f32", + "vld2_s8", + "vld2q_s8", + "vld2_s16", + "vld2q_s16", + "vld2_s32", + "vld2q_s32", + "vld2_lane_f16", + "vld2q_lane_f16", + "vld2_lane_f16", + "vld2q_lane_f16", + "vld2_lane_f32", + "vld2q_lane_f32", + "vld2_lane_s8", + "vld2_lane_s16", + "vld2q_lane_s16", + "vld2_lane_s32", + "vld2q_lane_s32", + "vld2_lane_f32", + "vld2q_lane_f32", + "vld2q_lane_s16", + "vld2q_lane_s32", + "vld2_lane_s8", + "vld2_lane_s16", + "vld2_lane_s32", + "vld2_lane_u8", + "vld2_lane_u16", + "vld2q_lane_u16", + "vld2_lane_u32", + "vld2q_lane_u32", + "vld2_lane_p8", + "vld2_lane_p16", + "vld2q_lane_p16", + "vld2_p64", + "vld2_s64", + "vld2_s64", + "vld2_u64", + "vld2_u8", + "vld2q_u8", + "vld2_u16", + "vld2q_u16", + "vld2_u32", + "vld2q_u32", + "vld2_p8", + "vld2q_p8", + "vld2_p16", + "vld2q_p16", + "vld3_dup_f16", + "vld3q_dup_f16", + "vld3_dup_f16", + "vld3q_dup_f16", + "vld3_dup_f32", + "vld3q_dup_f32", + "vld3_dup_s8", + "vld3q_dup_s8", + "vld3_dup_s16", + "vld3q_dup_s16", + "vld3_dup_s32", + "vld3q_dup_s32", + "vld3_dup_s64", + "vld3_dup_f32", + "vld3q_dup_f32", + "vld3_dup_s8", + "vld3q_dup_s8", + "vld3_dup_s16", + "vld3q_dup_s16", + "vld3_dup_s32", + "vld3q_dup_s32", + "vld3_dup_p64", + "vld3_dup_s64", + "vld3_dup_u64", + "vld3_dup_u8", + "vld3_dup_u8", + "vld3q_dup_u8", + "vld3q_dup_u8", + "vld3_dup_u16", + "vld3_dup_u16", + "vld3q_dup_u16", + "vld3q_dup_u16", + "vld3_dup_u32", + "vld3_dup_u32", + "vld3q_dup_u32", + "vld3q_dup_u32", + "vld3_dup_p8", + "vld3_dup_p8", + "vld3q_dup_p8", + "vld3q_dup_p8", + "vld3_dup_p16", + "vld3_dup_p16", + "vld3q_dup_p16", + "vld3q_dup_p16", + "vld3_f16", + "vld3q_f16", + "vld3_f16", + "vld3q_f16", + "vld3_f32", + "vld3q_f32", + "vld3_s8", + "vld3q_s8", + "vld3_s16", + "vld3q_s16", + "vld3_s32", + "vld3q_s32", + "vld3_f32", + "vld3q_f32", + "vld3_s8", + "vld3q_s8", + "vld3_s16", + "vld3q_s16", + "vld3_s32", + "vld3q_s32", + "vld3_lane_f16", + "vld3q_lane_f16", + "vld3_lane_f16", + "vld3q_lane_f16", + "vld3_lane_f32", + "vld3q_lane_f32", + "vld3_lane_f32", + "vld3_lane_s8", + "vld3_lane_s16", + "vld3q_lane_s16", + "vld3_lane_s32", + "vld3q_lane_s32", + "vld3_lane_s8", + "vld3_lane_s16", + "vld3q_lane_s16", + "vld3_lane_s32", + "vld3q_lane_s32", + "vld3_lane_u8", + "vld3_lane_u16", + "vld3q_lane_u16", + "vld3_lane_u32", + "vld3q_lane_u32", + "vld3_lane_p8", + "vld3_lane_p16", + "vld3q_lane_p16", + "vld3_p64", + "vld3_s64", + "vld3_s64", + "vld3_u64", + "vld3_u8", + "vld3q_u8", + "vld3_u16", + "vld3q_u16", + "vld3_u32", + "vld3q_u32", + "vld3_p8", + "vld3q_p8", + "vld3_p16", + "vld3q_p16", + "vld3q_lane_f32", + "vld4_dup_f16", + "vld4q_dup_f16", + "vld4_dup_f16", + "vld4q_dup_f16", + "vld4_dup_f32", + "vld4q_dup_f32", + "vld4_dup_s8", + "vld4q_dup_s8", + "vld4_dup_s16", + "vld4q_dup_s16", + "vld4_dup_s32", + "vld4q_dup_s32", + "vld4_dup_f32", + "vld4q_dup_f32", + "vld4_dup_s8", + "vld4q_dup_s8", + "vld4_dup_s16", + "vld4q_dup_s16", + "vld4_dup_s32", + "vld4q_dup_s32", + "vld4_dup_s64", + "vld4_dup_p64", + "vld4_dup_s64", + "vld4_dup_u64", + "vld4_dup_u8", + "vld4_dup_u8", + "vld4q_dup_u8", + "vld4q_dup_u8", + "vld4_dup_u16", + "vld4_dup_u16", + "vld4q_dup_u16", + "vld4q_dup_u16", + "vld4_dup_u32", + "vld4_dup_u32", + "vld4q_dup_u32", + "vld4q_dup_u32", + "vld4_dup_p8", + "vld4_dup_p8", + "vld4q_dup_p8", + "vld4q_dup_p8", + "vld4_dup_p16", + "vld4_dup_p16", + "vld4q_dup_p16", + "vld4q_dup_p16", + "vld4_f16", + "vld4q_f16", + "vld4_f16", + "vld4q_f16", + "vld4_f32", + "vld4q_f32", + "vld4_s8", + "vld4q_s8", + "vld4_s16", + "vld4q_s16", + "vld4_s32", + "vld4q_s32", + "vld4_f32", + "vld4q_f32", + "vld4_s8", + "vld4q_s8", + "vld4_s16", + "vld4q_s16", + "vld4_s32", + "vld4q_s32", + "vld4_lane_f16", + "vld4q_lane_f16", + "vld4_lane_f16", + "vld4q_lane_f16", + "vld4_lane_f32", + "vld4q_lane_f32", + "vld4_lane_s8", + "vld4_lane_s16", + "vld4q_lane_s16", + "vld4_lane_s32", + "vld4q_lane_s32", + "vld4_lane_f32", + "vld4q_lane_f32", + "vld4_lane_s8", + "vld4_lane_s16", + "vld4q_lane_s16", + "vld4_lane_s32", + "vld4q_lane_s32", + "vld4_lane_u8", + "vld4_lane_u16", + "vld4q_lane_u16", + "vld4_lane_u32", + "vld4q_lane_u32", + "vld4_lane_p8", + "vld4_lane_p16", + "vld4q_lane_p16", + "vld4_p64", + "vld4_s64", + "vld4_s64", + "vld4_u64", + "vld4_u8", + "vld4q_u8", + "vld4_u16", + "vld4q_u16", + "vld4_u32", + "vld4q_u32", + "vld4_p8", + "vld4q_p8", + "vld4_p16", + "vld4q_p16", + "vmax_f16", + "vmaxq_f16", + "vmax_f32", + "vmaxq_f32", + "vmax_s8", + "vmaxq_s8", + "vmax_s16", + "vmaxq_s16", + "vmax_s32", + "vmaxq_s32", + "vmax_u8", + "vmaxq_u8", + "vmax_u16", + "vmaxq_u16", + "vmax_u32", + "vmaxq_u32", + "vmaxnm_f16", + "vmaxnmq_f16", + "vmaxnm_f32", + "vmaxnmq_f32", + "vmin_f16", + "vminq_f16", + "vmin_f32", + "vminq_f32", + "vmin_s8", + "vminq_s8", + "vmin_s16", + "vminq_s16", + "vmin_s32", + "vminq_s32", + "vmin_u8", + "vminq_u8", + "vmin_u16", + "vminq_u16", + "vmin_u32", + "vminq_u32", + "vminnm_f16", + "vminnmq_f16", + "vminnm_f32", + "vminnmq_f32", + "vmla_f32", + "vmlaq_f32", + "vmla_lane_f32", + "vmla_laneq_f32", + "vmlaq_lane_f32", + "vmlaq_laneq_f32", + "vmla_lane_s16", + "vmla_lane_u16", + "vmla_laneq_s16", + "vmla_laneq_u16", + "vmlaq_lane_s16", + "vmlaq_lane_u16", + "vmlaq_laneq_s16", + "vmlaq_laneq_u16", + "vmla_lane_s32", + "vmla_lane_u32", + "vmla_laneq_s32", + "vmla_laneq_u32", + "vmlaq_lane_s32", + "vmlaq_lane_u32", + "vmlaq_laneq_s32", + "vmlaq_laneq_u32", + "vmla_n_f32", + "vmlaq_n_f32", + "vmla_n_s16", + "vmlaq_n_s16", + "vmla_n_u16", + "vmlaq_n_u16", + "vmla_n_s32", + "vmlaq_n_s32", + "vmla_n_u32", + "vmlaq_n_u32", + "vmla_s8", + "vmlaq_s8", + "vmla_s16", + "vmlaq_s16", + "vmla_s32", + "vmlaq_s32", + "vmla_u8", + "vmlaq_u8", + "vmla_u16", + "vmlaq_u16", + "vmla_u32", + "vmlaq_u32", + "vmlal_lane_s16", + "vmlal_laneq_s16", + "vmlal_lane_s32", + "vmlal_laneq_s32", + "vmlal_lane_u16", + "vmlal_laneq_u16", + "vmlal_lane_u32", + "vmlal_laneq_u32", + "vmlal_n_s16", + "vmlal_n_s32", + "vmlal_n_u16", + "vmlal_n_u32", + "vmlal_s8", + "vmlal_s16", + "vmlal_s32", + "vmlal_u8", + "vmlal_u16", + "vmlal_u32", + "vmls_f32", + "vmlsq_f32", + "vmls_lane_f32", + "vmls_laneq_f32", + "vmlsq_lane_f32", + "vmlsq_laneq_f32", + "vmls_lane_s16", + "vmls_lane_u16", + "vmls_laneq_s16", + "vmls_laneq_u16", + "vmlsq_lane_s16", + "vmlsq_lane_u16", + "vmlsq_laneq_s16", + "vmlsq_laneq_u16", + "vmls_lane_s32", + "vmls_lane_u32", + "vmls_laneq_s32", + "vmls_laneq_u32", + "vmlsq_lane_s32", + "vmlsq_lane_u32", + "vmlsq_laneq_s32", + "vmlsq_laneq_u32", + "vmls_n_f32", + "vmlsq_n_f32", + "vmls_n_s16", + "vmlsq_n_s16", + "vmls_n_u16", + "vmlsq_n_u16", + "vmls_n_s32", + "vmlsq_n_s32", + "vmls_n_u32", + "vmlsq_n_u32", + "vmls_s8", + "vmlsq_s8", + "vmls_s16", + "vmlsq_s16", + "vmls_s32", + "vmlsq_s32", + "vmls_u8", + "vmlsq_u8", + "vmls_u16", + "vmlsq_u16", + "vmls_u32", + "vmlsq_u32", + "vmlsl_lane_s16", + "vmlsl_laneq_s16", + "vmlsl_lane_s32", + "vmlsl_laneq_s32", + "vmlsl_lane_u16", + "vmlsl_laneq_u16", + "vmlsl_lane_u32", + "vmlsl_laneq_u32", + "vmlsl_n_s16", + "vmlsl_n_s32", + "vmlsl_n_u16", + "vmlsl_n_u32", + "vmlsl_s8", + "vmlsl_s16", + "vmlsl_s32", + "vmlsl_u8", + "vmlsl_u16", + "vmlsl_u32", + "vmmlaq_s32", + "vmmlaq_u32", + "vmov_n_f16", + "vmovq_n_f16", + "vmul_f16", + "vmulq_f16", + "vmul_lane_f16", + "vmulq_lane_f16", + "vmul_lane_f32", + "vmul_laneq_f32", + "vmulq_lane_f32", + "vmulq_laneq_f32", + "vmul_lane_s16", + "vmulq_lane_s16", + "vmul_lane_s32", + "vmulq_lane_s32", + "vmul_lane_u16", + "vmulq_lane_u16", + "vmul_lane_u32", + "vmulq_lane_u32", + "vmul_laneq_s16", + "vmulq_laneq_s16", + "vmul_laneq_s32", + "vmulq_laneq_s32", + "vmul_laneq_u16", + "vmulq_laneq_u16", + "vmul_laneq_u32", + "vmulq_laneq_u32", + "vmul_n_f16", + "vmulq_n_f16", + "vmul_n_f32", + "vmulq_n_f32", + "vmul_n_s16", + "vmulq_n_s16", + "vmul_n_s32", + "vmulq_n_s32", + "vmul_n_u16", + "vmulq_n_u16", + "vmul_n_u32", + "vmulq_n_u32", + "vmul_p8", + "vmulq_p8", + "vmull_lane_s16", + "vmull_laneq_s16", + "vmull_lane_s32", + "vmull_laneq_s32", + "vmull_lane_u16", + "vmull_laneq_u16", + "vmull_lane_u32", + "vmull_laneq_u32", + "vmull_n_s16", + "vmull_n_s32", + "vmull_n_u16", + "vmull_n_u32", + "vmull_p8", + "vmull_s16", + "vmull_s32", + "vmull_s8", + "vmull_u8", + "vmull_u16", + "vmull_u32", + "vneg_f16", + "vnegq_f16", + "vneg_f32", + "vnegq_f32", + "vneg_s8", + "vnegq_s8", + "vneg_s16", + "vnegq_s16", + "vneg_s32", + "vnegq_s32", + "vpadal_s8", + "vpadalq_s8", + "vpadal_s16", + "vpadalq_s16", + "vpadal_s32", + "vpadalq_s32", + "vpadal_u8", + "vpadalq_u8", + "vpadal_u16", + "vpadalq_u16", + "vpadal_u32", + "vpadalq_u32", + "vpadd_f16", + "vpadd_f32", + "vpadd_s8", + "vpadd_s16", + "vpadd_s32", + "vpadd_u8", + "vpadd_u8", + "vpadd_u16", + "vpadd_u16", + "vpadd_u32", + "vpadd_u32", + "vpaddl_s8", + "vpaddlq_s8", + "vpaddl_s16", + "vpaddlq_s16", + "vpaddl_s32", + "vpaddlq_s32", + "vpaddl_u8", + "vpaddlq_u8", + "vpaddl_u16", + "vpaddlq_u16", + "vpaddl_u32", + "vpaddlq_u32", + "vpmax_f32", + "vpmax_s8", + "vpmax_s16", + "vpmax_s32", + "vpmax_u8", + "vpmax_u16", + "vpmax_u32", + "vpmin_f32", + "vpmin_s8", + "vpmin_s16", + "vpmin_s32", + "vpmin_u8", + "vpmin_u16", + "vpmin_u32", + "vqabs_s8", + "vqabsq_s8", + "vqabs_s16", + "vqabsq_s16", + "vqabs_s32", + "vqabsq_s32", + "vqadd_s64", + "vqaddq_s64", + "vqadd_u64", + "vqaddq_u64", + "vqdmlal_lane_s16", + "vqdmlal_lane_s32", + "vqdmlal_n_s16", + "vqdmlal_n_s32", + "vqdmlal_s16", + "vqdmlal_s32", + "vqdmlsl_lane_s16", + "vqdmlsl_lane_s32", + "vqdmlsl_n_s16", + "vqdmlsl_n_s32", + "vqdmlsl_s16", + "vqdmlsl_s32", + "vqdmulh_laneq_s16", + "vqdmulhq_laneq_s16", + "vqdmulh_laneq_s32", + "vqdmulhq_laneq_s32", + "vqdmulh_n_s16", + "vqdmulhq_n_s16", + "vqdmulh_n_s32", + "vqdmulhq_n_s32", + "vqdmulh_s16", + "vqdmulhq_s16", + "vqdmulh_s32", + "vqdmulhq_s32", + "vqdmull_lane_s16", + "vqdmull_lane_s32", + "vqdmull_n_s16", + "vqdmull_n_s32", + "vqdmull_s16", + "vqdmull_s32", + "vqmovn_s16", + "vqmovn_s32", + "vqmovn_s64", + "vqmovn_u16", + "vqmovn_u32", + "vqmovn_u64", + "vqmovun_s16", + "vqmovun_s32", + "vqmovun_s64", + "vqneg_s8", + "vqnegq_s8", + "vqneg_s16", + "vqnegq_s16", + "vqneg_s32", + "vqnegq_s32", + "vqrdmulh_lane_s16", + "vqrdmulh_lane_s32", + "vqrdmulh_laneq_s16", + "vqrdmulh_laneq_s32", + "vqrdmulhq_lane_s16", + "vqrdmulhq_lane_s32", + "vqrdmulhq_laneq_s16", + "vqrdmulhq_laneq_s32", + "vqrdmulh_n_s16", + "vqrdmulhq_n_s16", + "vqrdmulh_n_s32", + "vqrdmulhq_n_s32", + "vqrdmulh_s16", + "vqrdmulhq_s16", + "vqrdmulh_s32", + "vqrdmulhq_s32", + "vqrshl_s8", + "vqrshlq_s8", + "vqrshl_s16", + "vqrshlq_s16", + "vqrshl_s32", + "vqrshlq_s32", + "vqrshl_s64", + "vqrshlq_s64", + "vqrshl_u8", + "vqrshlq_u8", + "vqrshl_u16", + "vqrshlq_u16", + "vqrshl_u32", + "vqrshlq_u32", + "vqrshl_u64", + "vqrshlq_u64", + "vqrshrn_n_s16", + "vqrshrn_n_s32", + "vqrshrn_n_s64", + "vqrshrn_n_s16", + "vqrshrn_n_s32", + "vqrshrn_n_s64", + "vqrshrn_n_u16", + "vqrshrn_n_u32", + "vqrshrn_n_u64", + "vqrshrn_n_u16", + "vqrshrn_n_u32", + "vqrshrn_n_u64", + "vqrshrun_n_s16", + "vqrshrun_n_s32", + "vqrshrun_n_s64", + "vqrshrun_n_s16", + "vqrshrun_n_s32", + "vqrshrun_n_s64", + "vqshl_n_s8", + "vqshlq_n_s8", + "vqshl_n_s16", + "vqshlq_n_s16", + "vqshl_n_s32", + "vqshlq_n_s32", + "vqshl_n_s64", + "vqshlq_n_s64", + "vqshl_n_u8", + "vqshlq_n_u8", + "vqshl_n_u16", + "vqshlq_n_u16", + "vqshl_n_u32", + "vqshlq_n_u32", + "vqshl_n_u64", + "vqshlq_n_u64", + "vqshl_s8", + "vqshlq_s8", + "vqshl_s16", + "vqshlq_s16", + "vqshl_s32", + "vqshlq_s32", + "vqshl_s64", + "vqshlq_s64", + "vqshl_u8", + "vqshlq_u8", + "vqshl_u16", + "vqshlq_u16", + "vqshl_u32", + "vqshlq_u32", + "vqshl_u64", + "vqshlq_u64", + "vqshlu_n_s8", + "vqshluq_n_s8", + "vqshlu_n_s16", + "vqshluq_n_s16", + "vqshlu_n_s32", + "vqshluq_n_s32", + "vqshlu_n_s64", + "vqshluq_n_s64", + "vqshlu_n_s8", + "vqshluq_n_s8", + "vqshlu_n_s16", + "vqshluq_n_s16", + "vqshlu_n_s32", + "vqshluq_n_s32", + "vqshlu_n_s64", + "vqshluq_n_s64", + "vqshrn_n_s16", + "vqshrn_n_s32", + "vqshrn_n_s64", + "vqshrn_n_s16", + "vqshrn_n_s32", + "vqshrn_n_s64", + "vqshrn_n_u16", + "vqshrn_n_u32", + "vqshrn_n_u64", + "vqshrn_n_u16", + "vqshrn_n_u32", + "vqshrn_n_u64", + "vqshrun_n_s16", + "vqshrun_n_s32", + "vqshrun_n_s64", + "vqshrun_n_s16", + "vqshrun_n_s32", + "vqshrun_n_s64", + "vqsub_s64", + "vqsubq_s64", + "vqsub_u64", + "vqsubq_u64", + "vraddhn_high_s16", + "vraddhn_high_s32", + "vraddhn_high_s64", + "vraddhn_high_u16", + "vraddhn_high_u32", + "vraddhn_high_u64", + "vraddhn_s16", + "vraddhn_s32", + "vraddhn_s64", + "vraddhn_u16", + "vraddhn_u16", + "vraddhn_u32", + "vraddhn_u32", + "vraddhn_u64", + "vraddhn_u64", + "vrecpe_f16", + "vrecpeq_f16", + "vrecpe_f32", + "vrecpeq_f32", + "vrecpe_u32", + "vrecpeq_u32", + "vrecps_f16", + "vrecpsq_f16", + "vrecps_f32", + "vrecpsq_f32", + "vreinterpret_f32_f16", + "vreinterpret_f32_f16", + "vreinterpret_s8_f16", + "vreinterpret_s8_f16", + "vreinterpret_s16_f16", + "vreinterpret_s16_f16", + "vreinterpret_s32_f16", + "vreinterpret_s32_f16", + "vreinterpret_s64_f16", + "vreinterpret_s64_f16", + "vreinterpret_u8_f16", + "vreinterpret_u8_f16", + "vreinterpret_u16_f16", + "vreinterpret_u16_f16", + "vreinterpret_u32_f16", + "vreinterpret_u32_f16", + "vreinterpret_u64_f16", + "vreinterpret_u64_f16", + "vreinterpret_p8_f16", + "vreinterpret_p8_f16", + "vreinterpret_p16_f16", + "vreinterpret_p16_f16", + "vreinterpretq_f32_f16", + "vreinterpretq_f32_f16", + "vreinterpretq_s8_f16", + "vreinterpretq_s8_f16", + "vreinterpretq_s16_f16", + "vreinterpretq_s16_f16", + "vreinterpretq_s32_f16", + "vreinterpretq_s32_f16", + "vreinterpretq_s64_f16", + "vreinterpretq_s64_f16", + "vreinterpretq_u8_f16", + "vreinterpretq_u8_f16", + "vreinterpretq_u16_f16", + "vreinterpretq_u16_f16", + "vreinterpretq_u32_f16", + "vreinterpretq_u32_f16", + "vreinterpretq_u64_f16", + "vreinterpretq_u64_f16", + "vreinterpretq_p8_f16", + "vreinterpretq_p8_f16", + "vreinterpretq_p16_f16", + "vreinterpretq_p16_f16", + "vreinterpret_f16_f32", + "vreinterpret_f16_f32", + "vreinterpretq_f16_f32", + "vreinterpretq_f16_f32", + "vreinterpret_f16_s8", + "vreinterpret_f16_s8", + "vreinterpretq_f16_s8", + "vreinterpretq_f16_s8", + "vreinterpret_f16_s16", + "vreinterpret_f16_s16", + "vreinterpretq_f16_s16", + "vreinterpretq_f16_s16", + "vreinterpret_f16_s32", + "vreinterpret_f16_s32", + "vreinterpretq_f16_s32", + "vreinterpretq_f16_s32", + "vreinterpret_f16_s64", + "vreinterpret_f16_s64", + "vreinterpretq_f16_s64", + "vreinterpretq_f16_s64", + "vreinterpret_f16_u8", + "vreinterpret_f16_u8", + "vreinterpretq_f16_u8", + "vreinterpretq_f16_u8", + "vreinterpret_f16_u16", + "vreinterpret_f16_u16", + "vreinterpretq_f16_u16", + "vreinterpretq_f16_u16", + "vreinterpret_f16_u32", + "vreinterpret_f16_u32", + "vreinterpretq_f16_u32", + "vreinterpretq_f16_u32", + "vreinterpret_f16_u64", + "vreinterpret_f16_u64", + "vreinterpretq_f16_u64", + "vreinterpretq_f16_u64", + "vreinterpret_f16_p8", + "vreinterpret_f16_p8", + "vreinterpretq_f16_p8", + "vreinterpretq_f16_p8", + "vreinterpret_f16_p16", + "vreinterpret_f16_p16", + "vreinterpretq_f16_p16", + "vreinterpretq_f16_p16", + "vreinterpretq_f16_p128", + "vreinterpretq_f16_p128", + "vreinterpret_p64_f16", + "vreinterpret_p64_f16", + "vreinterpretq_p128_f16", + "vreinterpretq_p128_f16", + "vreinterpretq_p64_f16", + "vreinterpretq_p64_f16", + "vreinterpret_f16_p64", + "vreinterpret_f16_p64", + "vreinterpretq_f16_p64", + "vreinterpretq_f16_p64", + "vreinterpretq_f32_p128", + "vreinterpretq_f32_p128", + "vreinterpret_s8_f32", + "vreinterpret_s8_f32", + "vreinterpret_s16_f32", + "vreinterpret_s16_f32", + "vreinterpret_s32_f32", + "vreinterpret_s32_f32", + "vreinterpret_s64_f32", + "vreinterpret_s64_f32", + "vreinterpret_u8_f32", + "vreinterpret_u8_f32", + "vreinterpret_u16_f32", + "vreinterpret_u16_f32", + "vreinterpret_u32_f32", + "vreinterpret_u32_f32", + "vreinterpret_u64_f32", + "vreinterpret_u64_f32", + "vreinterpret_p8_f32", + "vreinterpret_p8_f32", + "vreinterpret_p16_f32", + "vreinterpret_p16_f32", + "vreinterpretq_p128_f32", + "vreinterpretq_p128_f32", + "vreinterpretq_s8_f32", + "vreinterpretq_s8_f32", + "vreinterpretq_s16_f32", + "vreinterpretq_s16_f32", + "vreinterpretq_s32_f32", + "vreinterpretq_s32_f32", + "vreinterpretq_s64_f32", + "vreinterpretq_s64_f32", + "vreinterpretq_u8_f32", + "vreinterpretq_u8_f32", + "vreinterpretq_u16_f32", + "vreinterpretq_u16_f32", + "vreinterpretq_u32_f32", + "vreinterpretq_u32_f32", + "vreinterpretq_u64_f32", + "vreinterpretq_u64_f32", + "vreinterpretq_p8_f32", + "vreinterpretq_p8_f32", + "vreinterpretq_p16_f32", + "vreinterpretq_p16_f32", + "vreinterpret_f32_s8", + "vreinterpret_f32_s8", + "vreinterpret_s16_s8", + "vreinterpret_s16_s8", + "vreinterpret_s32_s8", + "vreinterpret_s32_s8", + "vreinterpret_s64_s8", + "vreinterpret_s64_s8", + "vreinterpret_u8_s8", + "vreinterpret_u8_s8", + "vreinterpret_u16_s8", + "vreinterpret_u16_s8", + "vreinterpret_u32_s8", + "vreinterpret_u32_s8", + "vreinterpret_u64_s8", + "vreinterpret_u64_s8", + "vreinterpret_p8_s8", + "vreinterpret_p8_s8", + "vreinterpret_p16_s8", + "vreinterpret_p16_s8", + "vreinterpretq_f32_s8", + "vreinterpretq_f32_s8", + "vreinterpretq_s16_s8", + "vreinterpretq_s16_s8", + "vreinterpretq_s32_s8", + "vreinterpretq_s32_s8", + "vreinterpretq_s64_s8", + "vreinterpretq_s64_s8", + "vreinterpretq_u8_s8", + "vreinterpretq_u8_s8", + "vreinterpretq_u16_s8", + "vreinterpretq_u16_s8", + "vreinterpretq_u32_s8", + "vreinterpretq_u32_s8", + "vreinterpretq_u64_s8", + "vreinterpretq_u64_s8", + "vreinterpretq_p8_s8", + "vreinterpretq_p8_s8", + "vreinterpretq_p16_s8", + "vreinterpretq_p16_s8", + "vreinterpret_f32_s16", + "vreinterpret_f32_s16", + "vreinterpret_s8_s16", + "vreinterpret_s8_s16", + "vreinterpret_s32_s16", + "vreinterpret_s32_s16", + "vreinterpret_s64_s16", + "vreinterpret_s64_s16", + "vreinterpret_u8_s16", + "vreinterpret_u8_s16", + "vreinterpret_u16_s16", + "vreinterpret_u16_s16", + "vreinterpret_u32_s16", + "vreinterpret_u32_s16", + "vreinterpret_u64_s16", + "vreinterpret_u64_s16", + "vreinterpret_p8_s16", + "vreinterpret_p8_s16", + "vreinterpret_p16_s16", + "vreinterpret_p16_s16", + "vreinterpretq_f32_s16", + "vreinterpretq_f32_s16", + "vreinterpretq_s8_s16", + "vreinterpretq_s8_s16", + "vreinterpretq_s32_s16", + "vreinterpretq_s32_s16", + "vreinterpretq_s64_s16", + "vreinterpretq_s64_s16", + "vreinterpretq_u8_s16", + "vreinterpretq_u8_s16", + "vreinterpretq_u16_s16", + "vreinterpretq_u16_s16", + "vreinterpretq_u32_s16", + "vreinterpretq_u32_s16", + "vreinterpretq_u64_s16", + "vreinterpretq_u64_s16", + "vreinterpretq_p8_s16", + "vreinterpretq_p8_s16", + "vreinterpretq_p16_s16", + "vreinterpretq_p16_s16", + "vreinterpret_f32_s32", + "vreinterpret_f32_s32", + "vreinterpret_s8_s32", + "vreinterpret_s8_s32", + "vreinterpret_s16_s32", + "vreinterpret_s16_s32", + "vreinterpret_s64_s32", + "vreinterpret_s64_s32", + "vreinterpret_u8_s32", + "vreinterpret_u8_s32", + "vreinterpret_u16_s32", + "vreinterpret_u16_s32", + "vreinterpret_u32_s32", + "vreinterpret_u32_s32", + "vreinterpret_u64_s32", + "vreinterpret_u64_s32", + "vreinterpret_p8_s32", + "vreinterpret_p8_s32", + "vreinterpret_p16_s32", + "vreinterpret_p16_s32", + "vreinterpretq_f32_s32", + "vreinterpretq_f32_s32", + "vreinterpretq_s8_s32", + "vreinterpretq_s8_s32", + "vreinterpretq_s16_s32", + "vreinterpretq_s16_s32", + "vreinterpretq_s64_s32", + "vreinterpretq_s64_s32", + "vreinterpretq_u8_s32", + "vreinterpretq_u8_s32", + "vreinterpretq_u16_s32", + "vreinterpretq_u16_s32", + "vreinterpretq_u32_s32", + "vreinterpretq_u32_s32", + "vreinterpretq_u64_s32", + "vreinterpretq_u64_s32", + "vreinterpretq_p8_s32", + "vreinterpretq_p8_s32", + "vreinterpretq_p16_s32", + "vreinterpretq_p16_s32", + "vreinterpret_f32_s64", + "vreinterpret_f32_s64", + "vreinterpret_s8_s64", + "vreinterpret_s8_s64", + "vreinterpret_s16_s64", + "vreinterpret_s16_s64", + "vreinterpret_s32_s64", + "vreinterpret_s32_s64", + "vreinterpret_u8_s64", + "vreinterpret_u8_s64", + "vreinterpret_u16_s64", + "vreinterpret_u16_s64", + "vreinterpret_u32_s64", + "vreinterpret_u32_s64", + "vreinterpret_u64_s64", + "vreinterpret_p8_s64", + "vreinterpret_p8_s64", + "vreinterpret_p16_s64", + "vreinterpret_p16_s64", + "vreinterpretq_f32_s64", + "vreinterpretq_f32_s64", + "vreinterpretq_s8_s64", + "vreinterpretq_s8_s64", + "vreinterpretq_s16_s64", + "vreinterpretq_s16_s64", + "vreinterpretq_s32_s64", + "vreinterpretq_s32_s64", + "vreinterpretq_u8_s64", + "vreinterpretq_u8_s64", + "vreinterpretq_u16_s64", + "vreinterpretq_u16_s64", + "vreinterpretq_u32_s64", + "vreinterpretq_u32_s64", + "vreinterpretq_u64_s64", + "vreinterpretq_u64_s64", + "vreinterpretq_p8_s64", + "vreinterpretq_p8_s64", + "vreinterpretq_p16_s64", + "vreinterpretq_p16_s64", + "vreinterpret_f32_u8", + "vreinterpret_f32_u8", + "vreinterpret_s8_u8", + "vreinterpret_s8_u8", + "vreinterpret_s16_u8", + "vreinterpret_s16_u8", + "vreinterpret_s32_u8", + "vreinterpret_s32_u8", + "vreinterpret_s64_u8", + "vreinterpret_s64_u8", + "vreinterpret_u16_u8", + "vreinterpret_u16_u8", + "vreinterpret_u32_u8", + "vreinterpret_u32_u8", + "vreinterpret_u64_u8", + "vreinterpret_u64_u8", + "vreinterpret_p8_u8", + "vreinterpret_p8_u8", + "vreinterpret_p16_u8", + "vreinterpret_p16_u8", + "vreinterpretq_f32_u8", + "vreinterpretq_f32_u8", + "vreinterpretq_s8_u8", + "vreinterpretq_s8_u8", + "vreinterpretq_s16_u8", + "vreinterpretq_s16_u8", + "vreinterpretq_s32_u8", + "vreinterpretq_s32_u8", + "vreinterpretq_s64_u8", + "vreinterpretq_s64_u8", + "vreinterpretq_u16_u8", + "vreinterpretq_u16_u8", + "vreinterpretq_u32_u8", + "vreinterpretq_u32_u8", + "vreinterpretq_u64_u8", + "vreinterpretq_u64_u8", + "vreinterpretq_p8_u8", + "vreinterpretq_p8_u8", + "vreinterpretq_p16_u8", + "vreinterpretq_p16_u8", + "vreinterpret_f32_u16", + "vreinterpret_f32_u16", + "vreinterpret_s8_u16", + "vreinterpret_s8_u16", + "vreinterpret_s16_u16", + "vreinterpret_s16_u16", + "vreinterpret_s32_u16", + "vreinterpret_s32_u16", + "vreinterpret_s64_u16", + "vreinterpret_s64_u16", + "vreinterpret_u8_u16", + "vreinterpret_u8_u16", + "vreinterpret_u32_u16", + "vreinterpret_u32_u16", + "vreinterpret_u64_u16", + "vreinterpret_u64_u16", + "vreinterpret_p8_u16", + "vreinterpret_p8_u16", + "vreinterpret_p16_u16", + "vreinterpret_p16_u16", + "vreinterpretq_f32_u16", + "vreinterpretq_f32_u16", + "vreinterpretq_s8_u16", + "vreinterpretq_s8_u16", + "vreinterpretq_s16_u16", + "vreinterpretq_s16_u16", + "vreinterpretq_s32_u16", + "vreinterpretq_s32_u16", + "vreinterpretq_s64_u16", + "vreinterpretq_s64_u16", + "vreinterpretq_u8_u16", + "vreinterpretq_u8_u16", + "vreinterpretq_u32_u16", + "vreinterpretq_u32_u16", + "vreinterpretq_u64_u16", + "vreinterpretq_u64_u16", + "vreinterpretq_p8_u16", + "vreinterpretq_p8_u16", + "vreinterpretq_p16_u16", + "vreinterpretq_p16_u16", + "vreinterpret_f32_u32", + "vreinterpret_f32_u32", + "vreinterpret_s8_u32", + "vreinterpret_s8_u32", + "vreinterpret_s16_u32", + "vreinterpret_s16_u32", + "vreinterpret_s32_u32", + "vreinterpret_s32_u32", + "vreinterpret_s64_u32", + "vreinterpret_s64_u32", + "vreinterpret_u8_u32", + "vreinterpret_u8_u32", + "vreinterpret_u16_u32", + "vreinterpret_u16_u32", + "vreinterpret_u64_u32", + "vreinterpret_u64_u32", + "vreinterpret_p8_u32", + "vreinterpret_p8_u32", + "vreinterpret_p16_u32", + "vreinterpret_p16_u32", + "vreinterpretq_f32_u32", + "vreinterpretq_f32_u32", + "vreinterpretq_s8_u32", + "vreinterpretq_s8_u32", + "vreinterpretq_s16_u32", + "vreinterpretq_s16_u32", + "vreinterpretq_s32_u32", + "vreinterpretq_s32_u32", + "vreinterpretq_s64_u32", + "vreinterpretq_s64_u32", + "vreinterpretq_u8_u32", + "vreinterpretq_u8_u32", + "vreinterpretq_u16_u32", + "vreinterpretq_u16_u32", + "vreinterpretq_u64_u32", + "vreinterpretq_u64_u32", + "vreinterpretq_p8_u32", + "vreinterpretq_p8_u32", + "vreinterpretq_p16_u32", + "vreinterpretq_p16_u32", + "vreinterpret_f32_u64", + "vreinterpret_f32_u64", + "vreinterpret_s8_u64", + "vreinterpret_s8_u64", + "vreinterpret_s16_u64", + "vreinterpret_s16_u64", + "vreinterpret_s32_u64", + "vreinterpret_s32_u64", + "vreinterpret_s64_u64", + "vreinterpret_u8_u64", + "vreinterpret_u8_u64", + "vreinterpret_u16_u64", + "vreinterpret_u16_u64", + "vreinterpret_u32_u64", + "vreinterpret_u32_u64", + "vreinterpret_p8_u64", + "vreinterpret_p8_u64", + "vreinterpret_p16_u64", + "vreinterpret_p16_u64", + "vreinterpretq_f32_u64", + "vreinterpretq_f32_u64", + "vreinterpretq_s8_u64", + "vreinterpretq_s8_u64", + "vreinterpretq_s16_u64", + "vreinterpretq_s16_u64", + "vreinterpretq_s32_u64", + "vreinterpretq_s32_u64", + "vreinterpretq_s64_u64", + "vreinterpretq_s64_u64", + "vreinterpretq_u8_u64", + "vreinterpretq_u8_u64", + "vreinterpretq_u16_u64", + "vreinterpretq_u16_u64", + "vreinterpretq_u32_u64", + "vreinterpretq_u32_u64", + "vreinterpretq_p8_u64", + "vreinterpretq_p8_u64", + "vreinterpretq_p16_u64", + "vreinterpretq_p16_u64", + "vreinterpret_f32_p8", + "vreinterpret_f32_p8", + "vreinterpret_s8_p8", + "vreinterpret_s8_p8", + "vreinterpret_s16_p8", + "vreinterpret_s16_p8", + "vreinterpret_s32_p8", + "vreinterpret_s32_p8", + "vreinterpret_s64_p8", + "vreinterpret_s64_p8", + "vreinterpret_u8_p8", + "vreinterpret_u8_p8", + "vreinterpret_u16_p8", + "vreinterpret_u16_p8", + "vreinterpret_u32_p8", + "vreinterpret_u32_p8", + "vreinterpret_u64_p8", + "vreinterpret_u64_p8", + "vreinterpret_p16_p8", + "vreinterpret_p16_p8", + "vreinterpretq_f32_p8", + "vreinterpretq_f32_p8", + "vreinterpretq_s8_p8", + "vreinterpretq_s8_p8", + "vreinterpretq_s16_p8", + "vreinterpretq_s16_p8", + "vreinterpretq_s32_p8", + "vreinterpretq_s32_p8", + "vreinterpretq_s64_p8", + "vreinterpretq_s64_p8", + "vreinterpretq_u8_p8", + "vreinterpretq_u8_p8", + "vreinterpretq_u16_p8", + "vreinterpretq_u16_p8", + "vreinterpretq_u32_p8", + "vreinterpretq_u32_p8", + "vreinterpretq_u64_p8", + "vreinterpretq_u64_p8", + "vreinterpretq_p16_p8", + "vreinterpretq_p16_p8", + "vreinterpret_f32_p16", + "vreinterpret_f32_p16", + "vreinterpret_s8_p16", + "vreinterpret_s8_p16", + "vreinterpret_s16_p16", + "vreinterpret_s16_p16", + "vreinterpret_s32_p16", + "vreinterpret_s32_p16", + "vreinterpret_s64_p16", + "vreinterpret_s64_p16", + "vreinterpret_u8_p16", + "vreinterpret_u8_p16", + "vreinterpret_u16_p16", + "vreinterpret_u16_p16", + "vreinterpret_u32_p16", + "vreinterpret_u32_p16", + "vreinterpret_u64_p16", + "vreinterpret_u64_p16", + "vreinterpret_p8_p16", + "vreinterpret_p8_p16", + "vreinterpretq_f32_p16", + "vreinterpretq_f32_p16", + "vreinterpretq_s8_p16", + "vreinterpretq_s8_p16", + "vreinterpretq_s16_p16", + "vreinterpretq_s16_p16", + "vreinterpretq_s32_p16", + "vreinterpretq_s32_p16", + "vreinterpretq_s64_p16", + "vreinterpretq_s64_p16", + "vreinterpretq_u8_p16", + "vreinterpretq_u8_p16", + "vreinterpretq_u16_p16", + "vreinterpretq_u16_p16", + "vreinterpretq_u32_p16", + "vreinterpretq_u32_p16", + "vreinterpretq_u64_p16", + "vreinterpretq_u64_p16", + "vreinterpretq_p8_p16", + "vreinterpretq_p8_p16", + "vreinterpretq_s8_p128", + "vreinterpretq_s8_p128", + "vreinterpretq_s16_p128", + "vreinterpretq_s16_p128", + "vreinterpretq_s32_p128", + "vreinterpretq_s32_p128", + "vreinterpretq_s64_p128", + "vreinterpretq_s64_p128", + "vreinterpretq_u8_p128", + "vreinterpretq_u8_p128", + "vreinterpretq_u16_p128", + "vreinterpretq_u16_p128", + "vreinterpretq_u32_p128", + "vreinterpretq_u32_p128", + "vreinterpretq_u64_p128", + "vreinterpretq_u64_p128", + "vreinterpretq_p8_p128", + "vreinterpretq_p8_p128", + "vreinterpretq_p16_p128", + "vreinterpretq_p16_p128", + "vreinterpretq_p64_p128", + "vreinterpretq_p64_p128", + "vreinterpret_p64_s8", + "vreinterpret_p64_s8", + "vreinterpretq_p128_s8", + "vreinterpretq_p128_s8", + "vreinterpretq_p64_s8", + "vreinterpretq_p64_s8", + "vreinterpret_p64_s16", + "vreinterpret_p64_s16", + "vreinterpretq_p128_s16", + "vreinterpretq_p128_s16", + "vreinterpretq_p64_s16", + "vreinterpretq_p64_s16", + "vreinterpret_p64_s32", + "vreinterpret_p64_s32", + "vreinterpretq_p128_s32", + "vreinterpretq_p128_s32", + "vreinterpretq_p64_s32", + "vreinterpretq_p64_s32", + "vreinterpretq_p128_s64", + "vreinterpretq_p128_s64", + "vreinterpret_p64_u8", + "vreinterpret_p64_u8", + "vreinterpretq_p128_u8", + "vreinterpretq_p128_u8", + "vreinterpretq_p64_u8", + "vreinterpretq_p64_u8", + "vreinterpret_p64_u16", + "vreinterpret_p64_u16", + "vreinterpretq_p128_u16", + "vreinterpretq_p128_u16", + "vreinterpretq_p64_u16", + "vreinterpretq_p64_u16", + "vreinterpret_p64_u32", + "vreinterpret_p64_u32", + "vreinterpretq_p128_u32", + "vreinterpretq_p128_u32", + "vreinterpretq_p64_u32", + "vreinterpretq_p64_u32", + "vreinterpretq_p128_u64", + "vreinterpretq_p128_u64", + "vreinterpret_p64_p8", + "vreinterpret_p64_p8", + "vreinterpretq_p128_p8", + "vreinterpretq_p128_p8", + "vreinterpretq_p64_p8", + "vreinterpretq_p64_p8", + "vreinterpret_p64_p16", + "vreinterpret_p64_p16", + "vreinterpretq_p128_p16", + "vreinterpretq_p128_p16", + "vreinterpretq_p64_p16", + "vreinterpretq_p64_p16", + "vreinterpret_s8_p64", + "vreinterpret_s8_p64", + "vreinterpret_s16_p64", + "vreinterpret_s16_p64", + "vreinterpret_s32_p64", + "vreinterpret_s32_p64", + "vreinterpret_u8_p64", + "vreinterpret_u8_p64", + "vreinterpret_u16_p64", + "vreinterpret_u16_p64", + "vreinterpret_u32_p64", + "vreinterpret_u32_p64", + "vreinterpret_p8_p64", + "vreinterpret_p8_p64", + "vreinterpret_p16_p64", + "vreinterpret_p16_p64", + "vreinterpretq_p128_p64", + "vreinterpretq_p128_p64", + "vreinterpretq_s8_p64", + "vreinterpretq_s8_p64", + "vreinterpretq_s16_p64", + "vreinterpretq_s16_p64", + "vreinterpretq_s32_p64", + "vreinterpretq_s32_p64", + "vreinterpretq_u8_p64", + "vreinterpretq_u8_p64", + "vreinterpretq_u16_p64", + "vreinterpretq_u16_p64", + "vreinterpretq_u32_p64", + "vreinterpretq_u32_p64", + "vreinterpretq_p8_p64", + "vreinterpretq_p8_p64", + "vreinterpretq_p16_p64", + "vreinterpretq_p16_p64", + "vrev64_f16", + "vrev64q_f16", + "vrndn_f16", + "vrndnq_f16", + "vrndn_f32", + "vrndnq_f32", + "vrshl_s8", + "vrshlq_s8", + "vrshl_s16", + "vrshlq_s16", + "vrshl_s32", + "vrshlq_s32", + "vrshl_s64", + "vrshlq_s64", + "vrshl_u8", + "vrshlq_u8", + "vrshl_u16", + "vrshlq_u16", + "vrshl_u32", + "vrshlq_u32", + "vrshl_u64", + "vrshlq_u64", + "vrshr_n_s8", + "vrshrq_n_s8", + "vrshr_n_s16", + "vrshrq_n_s16", + "vrshr_n_s32", + "vrshrq_n_s32", + "vrshr_n_s64", + "vrshrq_n_s64", + "vrshr_n_u8", + "vrshrq_n_u8", + "vrshr_n_u16", + "vrshrq_n_u16", + "vrshr_n_u32", + "vrshrq_n_u32", + "vrshr_n_u64", + "vrshrq_n_u64", + "vrshrn_n_s16", + "vrshrn_n_s32", + "vrshrn_n_s64", + "vrshrn_n_s16", + "vrshrn_n_s32", + "vrshrn_n_s64", + "vrshrn_n_u16", + "vrshrn_n_u32", + "vrshrn_n_u64", + "vrsqrte_f16", + "vrsqrteq_f16", + "vrsqrteq_f32", + "vrsqrte_u32", + "vrsqrteq_u32", + "vrsqrts_f16", + "vrsqrtsq_f16", + "vrsqrts_f32", + "vrsqrtsq_f32", + "vrsra_n_s8", + "vrsraq_n_s8", + "vrsra_n_s16", + "vrsraq_n_s16", + "vrsra_n_s32", + "vrsraq_n_s32", + "vrsra_n_s64", + "vrsraq_n_s64", + "vrsra_n_u8", + "vrsraq_n_u8", + "vrsra_n_u16", + "vrsraq_n_u16", + "vrsra_n_u32", + "vrsraq_n_u32", + "vrsra_n_u64", + "vrsraq_n_u64", + "vrsubhn_s16", + "vrsubhn_s32", + "vrsubhn_s64", + "vrsubhn_u16", + "vrsubhn_u16", + "vrsubhn_u32", + "vrsubhn_u32", + "vrsubhn_u64", + "vrsubhn_u64", + "vset_lane_f16", + "vsetq_lane_f16", + "vset_lane_f32", + "vsetq_lane_f32", + "vset_lane_s8", + "vsetq_lane_s8", + "vset_lane_s16", + "vsetq_lane_s16", + "vset_lane_s32", + "vsetq_lane_s32", + "vsetq_lane_s64", + "vset_lane_u8", + "vsetq_lane_u8", + "vset_lane_u16", + "vsetq_lane_u16", + "vset_lane_u32", + "vsetq_lane_u32", + "vsetq_lane_u64", + "vset_lane_p8", + "vsetq_lane_p8", + "vset_lane_p16", + "vsetq_lane_p16", + "vset_lane_p64", + "vset_lane_s64", + "vset_lane_u64", + "vsetq_lane_p64", + "vsha1cq_u32", + "vsha1h_u32", + "vsha1mq_u32", + "vsha1pq_u32", + "vsha1su0q_u32", + "vsha1su1q_u32", + "vsha256h2q_u32", + "vsha256hq_u32", + "vsha256su0q_u32", + "vsha256su1q_u32", + "vshl_n_s8", + "vshlq_n_s8", + "vshl_n_s16", + "vshlq_n_s16", + "vshl_n_s32", + "vshlq_n_s32", + "vshl_n_s64", + "vshlq_n_s64", + "vshl_n_u8", + "vshlq_n_u8", + "vshl_n_u16", + "vshlq_n_u16", + "vshl_n_u32", + "vshlq_n_u32", + "vshl_n_u64", + "vshlq_n_u64", + "vshl_s8", + "vshlq_s8", + "vshl_s16", + "vshlq_s16", + "vshl_s32", + "vshlq_s32", + "vshl_s64", + "vshlq_s64", + "vshl_u8", + "vshlq_u8", + "vshl_u16", + "vshlq_u16", + "vshl_u32", + "vshlq_u32", + "vshl_u64", + "vshlq_u64", + "vshll_n_s16", + "vshll_n_s32", + "vshll_n_s8", + "vshll_n_u16", + "vshll_n_u32", + "vshll_n_u8", + "vshr_n_s8", + "vshrq_n_s8", + "vshr_n_s16", + "vshrq_n_s16", + "vshr_n_s32", + "vshrq_n_s32", + "vshr_n_s64", + "vshrq_n_s64", + "vshr_n_u8", + "vshrq_n_u8", + "vshr_n_u16", + "vshrq_n_u16", + "vshr_n_u32", + "vshrq_n_u32", + "vshr_n_u64", + "vshrq_n_u64", + "vshrn_n_s16", + "vshrn_n_s32", + "vshrn_n_s64", + "vshrn_n_u16", + "vshrn_n_u32", + "vshrn_n_u64", + "vsra_n_s8", + "vsraq_n_s8", + "vsra_n_s16", + "vsraq_n_s16", + "vsra_n_s32", + "vsraq_n_s32", + "vsra_n_s64", + "vsraq_n_s64", + "vsra_n_u8", + "vsraq_n_u8", + "vsra_n_u16", + "vsraq_n_u16", + "vsra_n_u32", + "vsraq_n_u32", + "vsra_n_u64", + "vsraq_n_u64", + "vst1_f16", + "vst1q_f16", + "vst1_f16_x2", + "vst1q_f16_x2", + "vst1_f16_x2", + "vst1q_f16_x2", + "vst1_f16_x3", + "vst1q_f16_x3", + "vst1_f16_x3", + "vst1q_f16_x3", + "vst1_f16_x4", + "vst1q_f16_x4", + "vst1_f16_x4", + "vst1q_f16_x4", + "vst1_f32_x2", + "vst1q_f32_x2", + "vst1_f32_x2", + "vst1q_f32_x2", + "vst1_f32_x3", + "vst1q_f32_x3", + "vst1_f32_x4", + "vst1q_f32_x4", + "vst1_f32_x4", + "vst1q_f32_x4", + "vst1_lane_f16", + "vst1q_lane_f16", + "vst1_lane_f32", + "vst1q_lane_f32", + "vst1_lane_s8", + "vst1q_lane_s8", + "vst1_lane_s16", + "vst1q_lane_s16", + "vst1_lane_s32", + "vst1q_lane_s32", + "vst1q_lane_s64", + "vst1_lane_u8", + "vst1q_lane_u8", + "vst1_lane_u16", + "vst1q_lane_u16", + "vst1_lane_u32", + "vst1q_lane_u32", + "vst1q_lane_u64", + "vst1_lane_p8", + "vst1q_lane_p8", + "vst1_lane_p16", + "vst1q_lane_p16", + "vst1_lane_p64", + "vst1_lane_s64", + "vst1_lane_u64", + "vst1_p64_x2", + "vst1_p64_x3", + "vst1_p64_x4", + "vst1q_p64_x2", + "vst1q_p64_x3", + "vst1q_p64_x4", + "vst1_s8_x2", + "vst1q_s8_x2", + "vst1_s16_x2", + "vst1q_s16_x2", + "vst1_s32_x2", + "vst1q_s32_x2", + "vst1_s64_x2", + "vst1q_s64_x2", + "vst1_s8_x2", + "vst1q_s8_x2", + "vst1_s16_x2", + "vst1q_s16_x2", + "vst1_s32_x2", + "vst1q_s32_x2", + "vst1_s64_x2", + "vst1q_s64_x2", + "vst1_s8_x3", + "vst1q_s8_x3", + "vst1_s16_x3", + "vst1q_s16_x3", + "vst1_s32_x3", + "vst1q_s32_x3", + "vst1_s64_x3", + "vst1q_s64_x3", + "vst1_s8_x3", + "vst1q_s8_x3", + "vst1_s16_x3", + "vst1q_s16_x3", + "vst1_s32_x3", + "vst1q_s32_x3", + "vst1_s64_x3", + "vst1q_s64_x3", + "vst1_s8_x4", + "vst1q_s8_x4", + "vst1_s16_x4", + "vst1q_s16_x4", + "vst1_s32_x4", + "vst1q_s32_x4", + "vst1_s64_x4", + "vst1q_s64_x4", + "vst1_s8_x4", + "vst1q_s8_x4", + "vst1_s16_x4", + "vst1q_s16_x4", + "vst1_s32_x4", + "vst1q_s32_x4", + "vst1_s64_x4", + "vst1q_s64_x4", + "vst1_u8_x2", + "vst1_u8_x3", + "vst1_u8_x4", + "vst1q_u8_x2", + "vst1q_u8_x3", + "vst1q_u8_x4", + "vst1_u16_x2", + "vst1_u16_x3", + "vst1_u16_x4", + "vst1q_u16_x2", + "vst1q_u16_x3", + "vst1q_u16_x4", + "vst1_u32_x2", + "vst1_u32_x3", + "vst1_u32_x4", + "vst1q_u32_x2", + "vst1q_u32_x3", + "vst1q_u32_x4", + "vst1_u64_x2", + "vst1_u64_x3", + "vst1_u64_x4", + "vst1q_u64_x2", + "vst1q_u64_x3", + "vst1q_u64_x4", + "vst1_p8_x2", + "vst1_p8_x3", + "vst1_p8_x4", + "vst1q_p8_x2", + "vst1q_p8_x3", + "vst1q_p8_x4", + "vst1_p16_x2", + "vst1_p16_x3", + "vst1_p16_x4", + "vst1q_p16_x2", + "vst1q_p16_x3", + "vst1q_p16_x4", + "vst1q_lane_p64", + "vst2_f16", + "vst2q_f16", + "vst2_f16", + "vst2q_f16", + "vst2_f32", + "vst2q_f32", + "vst2_s8", + "vst2q_s8", + "vst2_s16", + "vst2q_s16", + "vst2_s32", + "vst2q_s32", + "vst2_f32", + "vst2q_f32", + "vst2_s8", + "vst2q_s8", + "vst2_s16", + "vst2q_s16", + "vst2_s32", + "vst2q_s32", + "vst2_lane_f16", + "vst2q_lane_f16", + "vst2_lane_f16", + "vst2q_lane_f16", + "vst2_lane_f32", + "vst2q_lane_f32", + "vst2_lane_s8", + "vst2_lane_s16", + "vst2q_lane_s16", + "vst2_lane_s32", + "vst2q_lane_s32", + "vst2_lane_f32", + "vst2q_lane_f32", + "vst2_lane_s8", + "vst2_lane_s16", + "vst2q_lane_s16", + "vst2_lane_s32", + "vst2q_lane_s32", + "vst2_lane_u8", + "vst2_lane_u16", + "vst2q_lane_u16", + "vst2_lane_u32", + "vst2q_lane_u32", + "vst2_lane_p8", + "vst2_lane_p16", + "vst2q_lane_p16", + "vst2_p64", + "vst2_s64", + "vst2_s64", + "vst2_u64", + "vst2_u8", + "vst2q_u8", + "vst2_u16", + "vst2q_u16", + "vst2_u32", + "vst2q_u32", + "vst2_p8", + "vst2q_p8", + "vst2_p16", + "vst2q_p16", + "vst3_f16", + "vst3q_f16", + "vst3_f16", + "vst3q_f16", + "vst3_f32", + "vst3q_f32", + "vst3_s8", + "vst3q_s8", + "vst3_s16", + "vst3q_s16", + "vst3_s32", + "vst3q_s32", + "vst3_f32", + "vst3q_f32", + "vst3_s8", + "vst3q_s8", + "vst3_s16", + "vst3q_s16", + "vst3_s32", + "vst3q_s32", + "vst3_lane_f16", + "vst3q_lane_f16", + "vst3_lane_f16", + "vst3q_lane_f16", + "vst3_lane_f32", + "vst3q_lane_f32", + "vst3_lane_s8", + "vst3_lane_s16", + "vst3q_lane_s16", + "vst3_lane_s32", + "vst3q_lane_s32", + "vst3_lane_f32", + "vst3q_lane_f32", + "vst3_lane_s8", + "vst3_lane_s16", + "vst3q_lane_s16", + "vst3_lane_s32", + "vst3q_lane_s32", + "vst3_lane_u8", + "vst3_lane_u16", + "vst3q_lane_u16", + "vst3_lane_u32", + "vst3q_lane_u32", + "vst3_lane_p8", + "vst3_lane_p16", + "vst3q_lane_p16", + "vst3_p64", + "vst3_s64", + "vst3_s64", + "vst3_u64", + "vst3_u8", + "vst3q_u8", + "vst3_u16", + "vst3q_u16", + "vst3_u32", + "vst3q_u32", + "vst3_p8", + "vst3q_p8", + "vst3_p16", + "vst3q_p16", + "vst4_f16", + "vst4q_f16", + "vst4_f16", + "vst4q_f16", + "vst4_f32", + "vst4q_f32", + "vst4_s8", + "vst4q_s8", + "vst4_s16", + "vst4q_s16", + "vst4_s32", + "vst4q_s32", + "vst4_f32", + "vst4q_f32", + "vst4_s8", + "vst4q_s8", + "vst4_s16", + "vst4q_s16", + "vst4_s32", + "vst4q_s32", + "vst4_lane_f16", + "vst4q_lane_f16", + "vst4_lane_f16", + "vst4q_lane_f16", + "vst4_lane_f32", + "vst4q_lane_f32", + "vst4_lane_s8", + "vst4_lane_s16", + "vst4q_lane_s16", + "vst4_lane_s32", + "vst4q_lane_s32", + "vst4_lane_f32", + "vst4q_lane_f32", + "vst4_lane_s8", + "vst4_lane_s16", + "vst4q_lane_s16", + "vst4_lane_s32", + "vst4q_lane_s32", + "vst4_lane_u8", + "vst4_lane_u16", + "vst4q_lane_u16", + "vst4_lane_u32", + "vst4q_lane_u32", + "vst4_lane_p8", + "vst4_lane_p16", + "vst4q_lane_p16", + "vst4_p64", + "vst4_s64", + "vst4_s64", + "vst4_u64", + "vst4_u8", + "vst4q_u8", + "vst4_u16", + "vst4q_u16", + "vst4_u32", + "vst4q_u32", + "vst4_p8", + "vst4q_p8", + "vst4_p16", + "vst4q_p16", + "vsub_f16", + "vsubq_f16", + "vsub_s64", + "vsubq_s64", + "vsub_u64", + "vsubq_u64", + "vsubhn_high_s16", + "vsubhn_high_s32", + "vsubhn_high_s64", + "vsubhn_high_u16", + "vsubhn_high_u32", + "vsubhn_high_u64", + "vsubhn_s16", + "vsubhn_s32", + "vsubhn_s64", + "vsubhn_u16", + "vsubhn_u32", + "vsubhn_u64", + "vsubl_s8", + "vsubl_s16", + "vsubl_s32", + "vsubl_u8", + "vsubl_u16", + "vsubl_u32", + "vsubw_s8", + "vsubw_s16", + "vsubw_s32", + "vsubw_u8", + "vsubw_u16", + "vsubw_u32", + "vsudot_lane_s32", + "vsudot_lane_s32", + "vsudotq_lane_s32", + "vsudotq_lane_s32", + "vsudot_laneq_s32", + "vsudotq_laneq_s32", + "vtrn_f16", + "vtrnq_f16", + "vtrn_f32", + "vtrn_s32", + "vtrn_u32", + "vtrnq_f32", + "vtrn_s8", + "vtrnq_s8", + "vtrn_s16", + "vtrnq_s16", + "vtrnq_s32", + "vtrn_u8", + "vtrnq_u8", + "vtrn_u16", + "vtrnq_u16", + "vtrnq_u32", + "vtrn_p8", + "vtrnq_p8", + "vtrn_p16", + "vtrnq_p16", + "vtst_s8", + "vtstq_s8", + "vtst_s16", + "vtstq_s16", + "vtst_s32", + "vtstq_s32", + "vtst_p8", + "vtstq_p8", + "vtst_p16", + "vtstq_p16", + "vtst_u8", + "vtstq_u8", + "vtst_u16", + "vtstq_u16", + "vtst_u32", + "vtstq_u32", + "vusdot_lane_s32", + "vusdot_lane_s32", + "vusdotq_lane_s32", + "vusdotq_lane_s32", + "vusdot_laneq_s32", + "vusdot_laneq_s32", + "vusdotq_laneq_s32", + "vusdotq_laneq_s32", + "vusdot_s32", + "vusdotq_s32", + "vusmmlaq_s32", + "vuzp_f16", + "vuzpq_f16", + "vuzp_f32", + "vuzp_s32", + "vuzp_u32", + "vuzpq_f32", + "vuzp_s8", + "vuzpq_s8", + "vuzp_s16", + "vuzpq_s16", + "vuzpq_s32", + "vuzp_u8", + "vuzpq_u8", + "vuzp_u16", + "vuzpq_u16", + "vuzpq_u32", + "vuzp_p8", + "vuzpq_p8", + "vuzp_p16", + "vuzpq_p16", + "vzip_f16", + "vzipq_f16", + "vzip_f32", + "vzip_s32", + "vzip_u32", + "vzip_s8", + "vzip_s16", + "vzip_u8", + "vzip_u16", + "vzip_p8", + "vzip_p16", + "vzipq_f32", + "vzipq_s8", + "vzipq_s16", + "vzipq_s32", + "vzipq_u8", + "vzipq_u16", + "vzipq_u32", + "vzipq_p8", + "vzipq_p16", + "__rndr", + "__rndrrs", +]; diff --git a/intrinsics_data/arm_intrinsics.json b/intrinsics_data/arm_intrinsics.json index bce85d19a1..d442f2f1fa 100644 --- a/intrinsics_data/arm_intrinsics.json +++ b/intrinsics_data/arm_intrinsics.json @@ -224,21 +224,25 @@ ] }, { - "SIMD_ISA": "Neon", - "name": "vscale_f16", + "SIMD_ISA": "SVE2", + "name": "svaba[_n_s16]", "arguments": [ - "float16x4_t a", - "int16x4_t b" + "svint16_t op1", + "svint16_t op2", + "int16_t op3" ], "return_type": { - "value": "float16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ @@ -246,26 +250,34 @@ ], "instructions": [ [ - "FSCALE" + "SABA" + ], + [ + "MOVPRFX", + "SABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vscaleq_f16", + "SIMD_ISA": "SVE2", + "name": "svaba[_n_s32]", "arguments": [ - "float16x8_t a", - "int16x8_t b" + "svint32_t op1", + "svint32_t op2", + "int32_t op3" ], "return_type": { - "value": "float16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ @@ -273,26 +285,34 @@ ], "instructions": [ [ - "FSCALE" + "SABA" + ], + [ + "MOVPRFX", + "SABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vscale_f32", + "SIMD_ISA": "SVE2", + "name": "svaba[_n_s64]", "arguments": [ - "float32x2_t a", - "int32x2_t b" + "svint64_t op1", + "svint64_t op2", + "int64_t op3" ], "return_type": { - "value": "float32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ @@ -300,26 +320,34 @@ ], "instructions": [ [ - "FSCALE" + "SABA" + ], + [ + "MOVPRFX", + "SABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vscaleq_f32", + "SIMD_ISA": "SVE2", + "name": "svaba[_n_s8]", "arguments": [ - "float32x4_t a", - "int32x4_t b" + "svint8_t op1", + "svint8_t op2", + "int8_t op3" ], "return_type": { - "value": "float32x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ @@ -327,26 +355,34 @@ ], "instructions": [ [ - "FSCALE" + "SABA" + ], + [ + "MOVPRFX", + "SABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vscaleq_f64", + "SIMD_ISA": "SVE2", + "name": "svaba[_n_u16]", "arguments": [ - "float64x2_t a", - "int64x2_t b" + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" ], "return_type": { - "value": "float64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ @@ -354,228 +390,244 @@ ], "instructions": [ [ - "FSCALE" + "UABA" + ], + [ + "MOVPRFX", + "UABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaba_s16", + "SIMD_ISA": "SVE2", + "name": "svaba[_n_u32]", "arguments": [ - "int16x4_t a", - "int16x4_t b", - "int16x4_t c" + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" ], "return_type": { - "value": "int16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.S" }, - "c": { - "register": "Vm.4H" + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABA" + "UABA" + ], + [ + "MOVPRFX", + "UABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaba_s32", + "SIMD_ISA": "SVE2", + "name": "svaba[_n_u64]", "arguments": [ - "int32x2_t a", - "int32x2_t b", - "int32x2_t c" + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" ], "return_type": { - "value": "int32x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.D" }, - "c": { - "register": "Vm.2S" + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABA" + "UABA" + ], + [ + "MOVPRFX", + "UABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaba_s8", + "SIMD_ISA": "SVE2", + "name": "svaba[_n_u8]", "arguments": [ - "int8x8_t a", - "int8x8_t b", - "int8x8_t c" + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" ], "return_type": { - "value": "int8x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.B" }, - "c": { - "register": "Vm.8B" + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABA" + "UABA" + ], + [ + "MOVPRFX", + "UABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaba_u16", + "SIMD_ISA": "SVE2", + "name": "svaba[_s16]", "arguments": [ - "uint16x4_t a", - "uint16x4_t b", - "uint16x4_t c" + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" ], "return_type": { - "value": "uint16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.H" }, - "c": { - "register": "Vm.4H" + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABA" + "SABA" + ], + [ + "MOVPRFX", + "SABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaba_u32", + "SIMD_ISA": "SVE2", + "name": "svaba[_s32]", "arguments": [ - "uint32x2_t a", - "uint32x2_t b", - "uint32x2_t c" + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" ], "return_type": { - "value": "uint32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.S" }, - "c": { - "register": "Vm.2S" + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABA" + "SABA" + ], + [ + "MOVPRFX", + "SABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaba_u8", + "SIMD_ISA": "SVE2", + "name": "svaba[_s64]", "arguments": [ - "uint8x8_t a", - "uint8x8_t b", - "uint8x8_t c" + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" ], "return_type": { - "value": "uint8x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D" }, - "c": { - "register": "Vm.8B" + "op3": { + "register": "Zop3.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABA" + "SABA" + ], + [ + "MOVPRFX", + "SABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_high_s16", + "SIMD_ISA": "SVE2", + "name": "svaba[_s8]", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16x8_t c" + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" ], "return_type": { - "value": "int32x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.B" }, - "c": { - "register": "Vm.8H" + "op3": { + "register": "Zop3.B" } }, "Architectures": [ @@ -583,30 +635,34 @@ ], "instructions": [ [ - "SABAL2" + "SABA" + ], + [ + "MOVPRFX", + "SABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_high_s32", + "SIMD_ISA": "SVE2", + "name": "svaba[_u16]", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32x4_t c" + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" ], "return_type": { - "value": "int64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.4S" + "op2": { + "register": "Zop2.H" }, - "c": { - "register": "Vm.4S" + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -614,30 +670,34 @@ ], "instructions": [ [ - "SABAL2" + "UABA" + ], + [ + "MOVPRFX", + "UABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_high_s8", + "SIMD_ISA": "SVE2", + "name": "svaba[_u32]", "arguments": [ - "int16x8_t a", - "int8x16_t b", - "int8x16_t c" + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" ], "return_type": { - "value": "int16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.S" }, - "c": { - "register": "Vm.16B" + "op3": { + "register": "Zop3.S" } }, "Architectures": [ @@ -645,30 +705,34 @@ ], "instructions": [ [ - "SABAL2" + "UABA" + ], + [ + "MOVPRFX", + "UABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_high_u16", + "SIMD_ISA": "SVE2", + "name": "svaba[_u64]", "arguments": [ - "uint32x4_t a", - "uint16x8_t b", - "uint16x8_t c" + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.D" }, - "c": { - "register": "Vm.8H" + "op3": { + "register": "Zop3.D" } }, "Architectures": [ @@ -676,30 +740,34 @@ ], "instructions": [ [ - "UABAL2" + "UABA" + ], + [ + "MOVPRFX", + "UABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_high_u32", + "SIMD_ISA": "SVE2", + "name": "svaba[_u8]", "arguments": [ - "uint64x2_t a", - "uint32x4_t b", - "uint32x4_t c" + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vn.4S" + "op2": { + "register": "Zop2.B" }, - "c": { - "register": "Vm.4S" + "op3": { + "register": "Zop3.B" } }, "Architectures": [ @@ -707,30 +775,34 @@ ], "instructions": [ [ - "UABAL2" + "UABA" + ], + [ + "MOVPRFX", + "UABA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_high_u8", + "SIMD_ISA": "SVE2", + "name": "svabalb[_n_s16]", "arguments": [ - "uint16x8_t a", - "uint8x16_t b", - "uint8x16_t c" + "svint16_t op1", + "svint8_t op2", + "int8_t op3" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.B" }, - "c": { - "register": "Vm.16B" + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ @@ -738,479 +810,524 @@ ], "instructions": [ [ - "UABAL2" + "SABALB" + ], + [ + "MOVPRFX", + "SABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_s16", + "SIMD_ISA": "SVE2", + "name": "svabalb[_n_s32]", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x4_t c" + "svint32_t op1", + "svint16_t op2", + "int16_t op3" ], "return_type": { - "value": "int32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.H" }, - "c": { - "register": "Vm.4H" + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABAL" + "SABALB" + ], + [ + "MOVPRFX", + "SABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_s32", + "SIMD_ISA": "SVE2", + "name": "svabalb[_n_s64]", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x2_t c" + "svint64_t op1", + "svint32_t op2", + "int32_t op3" ], "return_type": { - "value": "int64x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.S" }, - "c": { - "register": "Vm.2S" + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABAL" + "SABALB" + ], + [ + "MOVPRFX", + "SABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_s8", + "SIMD_ISA": "SVE2", + "name": "svabalb[_n_u16]", "arguments": [ - "int16x8_t a", - "int8x8_t b", - "int8x8_t c" + "svuint16_t op1", + "svuint8_t op2", + "uint8_t op3" ], "return_type": { - "value": "int16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.B" }, - "c": { - "register": "Vm.8B" + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABAL" + "UABALB" + ], + [ + "MOVPRFX", + "UABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_u16", + "SIMD_ISA": "SVE2", + "name": "svabalb[_n_u32]", "arguments": [ - "uint32x4_t a", - "uint16x4_t b", - "uint16x4_t c" + "svuint32_t op1", + "svuint16_t op2", + "uint16_t op3" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.H" }, - "c": { - "register": "Vm.4H" + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABAL" + "UABALB" + ], + [ + "MOVPRFX", + "UABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_u32", + "SIMD_ISA": "SVE2", + "name": "svabalb[_n_u64]", "arguments": [ - "uint64x2_t a", - "uint32x2_t b", - "uint32x2_t c" + "svuint64_t op1", + "svuint32_t op2", + "uint32_t op3" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.S" }, - "c": { - "register": "Vm.2S" + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABAL" + "UABALB" + ], + [ + "MOVPRFX", + "UABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabal_u8", + "SIMD_ISA": "SVE2", + "name": "svabalb[_s16]", "arguments": [ - "uint16x8_t a", - "uint8x8_t b", - "uint8x8_t c" + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.B" }, - "c": { - "register": "Vm.8B" + "op3": { + "register": "Zop3.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABAL" + "SABALB" + ], + [ + "MOVPRFX", + "SABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabaq_s16", + "SIMD_ISA": "SVE2", + "name": "svabalb[_s32]", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16x8_t c" + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" ], "return_type": { - "value": "int16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.H" }, - "c": { - "register": "Vm.8H" + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABA" + "SABALB" + ], + [ + "MOVPRFX", + "SABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabaq_s32", + "SIMD_ISA": "SVE2", + "name": "svabalb[_s64]", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32x4_t c" + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" ], "return_type": { - "value": "int32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.4S" + "op2": { + "register": "Zop2.S" }, - "c": { - "register": "Vm.4S" + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABA" + "SABALB" + ], + [ + "MOVPRFX", + "SABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabaq_s8", + "SIMD_ISA": "SVE2", + "name": "svabalb[_u16]", "arguments": [ - "int8x16_t a", - "int8x16_t b", - "int8x16_t c" + "svuint16_t op1", + "svuint8_t op2", + "svuint8_t op3" ], "return_type": { - "value": "int8x16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.B" }, - "c": { - "register": "Vm.16B" + "op3": { + "register": "Zop3.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABA" + "UABALB" + ], + [ + "MOVPRFX", + "UABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabaq_u16", + "SIMD_ISA": "SVE2", + "name": "svabalb[_u32]", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16x8_t c" + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.H" }, - "c": { - "register": "Vm.8H" + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABA" + "UABALB" + ], + [ + "MOVPRFX", + "UABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabaq_u32", + "SIMD_ISA": "SVE2", + "name": "svabalb[_u64]", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c" + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.4S" + "op2": { + "register": "Zop2.S" }, - "c": { - "register": "Vm.4S" + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABA" + "UABALB" + ], + [ + "MOVPRFX", + "UABALB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabaq_u8", + "SIMD_ISA": "SVE2", + "name": "svabalt[_n_s16]", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", - "uint8x16_t c" + "svint16_t op1", + "svint8_t op2", + "int8_t op3" ], "return_type": { - "value": "uint8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.B" }, - "c": { - "register": "Vm.16B" + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABA" + "SABALT" + ], + [ + "MOVPRFX", + "SABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabd_f16", + "SIMD_ISA": "SVE2", + "name": "svabalt[_n_s32]", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svint32_t op1", + "svint16_t op2", + "int16_t op3" ], "return_type": { - "value": "float16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FABD" + "SABALT" + ], + [ + "MOVPRFX", + "SABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabd_f32", + "SIMD_ISA": "SVE2", + "name": "svabalt[_n_s64]", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svint64_t op1", + "svint32_t op2", + "int32_t op3" ], "return_type": { - "value": "float32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FABD" + "SABALT" + ], + [ + "MOVPRFX", + "SABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabd_f64", + "SIMD_ISA": "SVE2", + "name": "svabalt[_n_u16]", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svuint16_t op1", + "svuint8_t op2", + "uint8_t op3" ], "return_type": { - "value": "float64x1_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ @@ -1218,200 +1335,244 @@ ], "instructions": [ [ - "FABD" + "UABALT" + ], + [ + "MOVPRFX", + "UABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabd_s16", + "SIMD_ISA": "SVE2", + "name": "svabalt[_n_u32]", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svuint32_t op1", + "svuint16_t op2", + "uint16_t op3" ], "return_type": { - "value": "int16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABD" + "UABALT" + ], + [ + "MOVPRFX", + "UABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabd_s32", + "SIMD_ISA": "SVE2", + "name": "svabalt[_n_u64]", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svuint64_t op1", + "svuint32_t op2", + "uint32_t op3" ], "return_type": { - "value": "int32x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABD" + "UABALT" + ], + [ + "MOVPRFX", + "UABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabd_s8", + "SIMD_ISA": "SVE2", + "name": "svabalt[_s16]", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" ], "return_type": { - "value": "int8x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABD" + "SABALT" + ], + [ + "MOVPRFX", + "SABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabd_u16", + "SIMD_ISA": "SVE2", + "name": "svabalt[_s32]", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" ], "return_type": { - "value": "uint16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABD" + "SABALT" + ], + [ + "MOVPRFX", + "SABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabd_u32", + "SIMD_ISA": "SVE2", + "name": "svabalt[_s64]", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" ], "return_type": { - "value": "uint32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABD" + "SABALT" + ], + [ + "MOVPRFX", + "SABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabd_u8", + "SIMD_ISA": "SVE2", + "name": "svabalt[_u16]", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svuint16_t op1", + "svuint8_t op2", + "svuint8_t op3" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABD" + "UABALT" + ], + [ + "MOVPRFX", + "UABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdd_f64", + "SIMD_ISA": "SVE2", + "name": "svabalt[_u32]", "arguments": [ - "float64_t a", - "float64_t b" + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3" ], "return_type": { - "value": "float64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -1419,26 +1580,34 @@ ], "instructions": [ [ - "FABD" + "UABALT" + ], + [ + "MOVPRFX", + "UABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdh_f16", + "SIMD_ISA": "SVE2", + "name": "svabalt[_u64]", "arguments": [ - "float16_t a", - "float16_t b" + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3" ], "return_type": { - "value": "float16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ @@ -1446,26 +1615,34 @@ ], "instructions": [ [ - "FABD" + "UABALT" + ], + [ + "MOVPRFX", + "UABALT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_high_s16", + "SIMD_ISA": "SVE", + "name": "svabd[_f16]_m", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -1473,26 +1650,34 @@ ], "instructions": [ [ - "SABDL2" + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_high_s32", + "SIMD_ISA": "SVE", + "name": "svabd[_f16]_x", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -1500,26 +1685,37 @@ ], "instructions": [ [ - "SABDL2" + "FABD" + ], + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_high_s8", + "SIMD_ISA": "SVE", + "name": "svabd[_f16]_z", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -1527,26 +1723,35 @@ ], "instructions": [ [ - "SABDL2" + "MOVPRFX", + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_high_u16", + "SIMD_ISA": "SVE", + "name": "svabd[_f32]_m", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -1554,26 +1759,34 @@ ], "instructions": [ [ - "UABDL2" + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_high_u32", + "SIMD_ISA": "SVE", + "name": "svabd[_f32]_x", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -1581,26 +1794,37 @@ ], "instructions": [ [ - "UABDL2" + "FABD" + ], + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_high_u8", + "SIMD_ISA": "SVE", + "name": "svabd[_f32]_z", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -1608,257 +1832,326 @@ ], "instructions": [ [ - "UABDL2" + "MOVPRFX", + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_s16", + "SIMD_ISA": "SVE", + "name": "svabd[_f64]_m", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABDL" + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_s32", + "SIMD_ISA": "SVE", + "name": "svabd[_f64]_x", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABDL" + "FABD" + ], + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_s8", + "SIMD_ISA": "SVE", + "name": "svabd[_f64]_z", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABDL" + "MOVPRFX", + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_u16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_f16]_m", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABDL" + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_u32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_f16]_x", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABDL" + "FABD" + ], + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdl_u8", + "SIMD_ISA": "SVE", + "name": "svabd[_n_f16]_z", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABDL" + "MOVPRFX", + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdq_f16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_f32]_m", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdq_f32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_f32]_x", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ "FABD" + ], + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdq_f64", + "SIMD_ISA": "SVE", + "name": "svabd[_n_f32]_z", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -1866,200 +2159,253 @@ ], "instructions": [ [ + "MOVPRFX", + "FABD" + ], + [ + "MOVPRFX", "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdq_s16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_f64]_m", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABD" + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdq_s32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_f64]_x", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABD" + "FABD" + ], + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdq_s8", + "SIMD_ISA": "SVE", + "name": "svabd[_n_f64]_z", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SABD" + "MOVPRFX", + "FABD" + ], + [ + "MOVPRFX", + "FABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdq_u16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s16]_m", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABD" + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdq_u32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s16]_x", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABD" + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabdq_u8", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s16]_z", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UABD" + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabds_f32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s32]_m", "arguments": [ - "float32_t a", - "float32_t b" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "float32_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Sm" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -2067,71 +2413,108 @@ ], "instructions": [ [ - "FABD" + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabs_f16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s32]_x", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FABS" + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabs_f32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s32]_z", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FABS" + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabs_f64", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s64]_m", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "float64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -2139,72 +2522,108 @@ ], "instructions": [ [ - "FABS" + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabs_s16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s64]_x", "arguments": [ - "int16x4_t a" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ABS" + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabs_s32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s64]_z", "arguments": [ - "int32x2_t a" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ABS" - ] - ] + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] }, { - "SIMD_ISA": "Neon", - "name": "vabs_s64", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s8]_m", "arguments": [ - "int64x1_t a" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -2212,47 +2631,72 @@ ], "instructions": [ [ - "ABS" + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabs_s8", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s8]_x", "arguments": [ - "int8x8_t a" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ABS" + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabsd_s64", + "SIMD_ISA": "SVE", + "name": "svabd[_n_s8]_z", "arguments": [ - "int64_t a" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "int64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -2260,95 +2704,144 @@ ], "instructions": [ [ - "ABS" + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabsh_f16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u16]_m", "arguments": [ - "float16_t a" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "float16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FABS" + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabsq_f16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u16]_x", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FABS" + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabsq_f32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u16]_z", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FABS" + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabsq_f64", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u32]_m", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -2356,72 +2849,108 @@ ], "instructions": [ [ - "FABS" + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabsq_s16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u32]_x", "arguments": [ - "int16x8_t a" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ABS" + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabsq_s32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u32]_z", "arguments": [ - "int32x4_t a" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ABS" + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabsq_s64", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u64]_m", "arguments": [ - "int64x2_t a" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -2429,108 +2958,143 @@ ], "instructions": [ [ - "ABS" + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vabsq_s8", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u64]_x", "arguments": [ - "int8x16_t a" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ABS" + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_f16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u64]_z", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FADD" + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_f32", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u8]_m", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FADD" + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_f64", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u8]_x", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "float64x1_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -2538,345 +3102,435 @@ ], "instructions": [ [ - "FADD" + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_p16", + "SIMD_ISA": "SVE", + "name": "svabd[_n_u8]_z", "arguments": [ - "poly16x4_t a", - "poly16x4_t b" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "poly16x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_p64", + "SIMD_ISA": "SVE", + "name": "svabd[_s16]_m", "arguments": [ - "poly64x1_t a", - "poly64x1_t b" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "poly64x1_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_p8", + "SIMD_ISA": "SVE", + "name": "svabd[_s16]_x", "arguments": [ - "poly8x8_t a", - "poly8x8_t b" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "poly8x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_s16", + "SIMD_ISA": "SVE", + "name": "svabd[_s16]_z", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_s32", + "SIMD_ISA": "SVE", + "name": "svabd[_s32]_m", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_s64", + "SIMD_ISA": "SVE", + "name": "svabd[_s32]_x", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_s8", + "SIMD_ISA": "SVE", + "name": "svabd[_s32]_z", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_u16", + "SIMD_ISA": "SVE", + "name": "svabd[_s64]_m", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_u32", + "SIMD_ISA": "SVE", + "name": "svabd[_s64]_x", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_u64", + "SIMD_ISA": "SVE", + "name": "svabd[_s64]_z", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vadd_u8", + "SIMD_ISA": "SVE", + "name": "svabd[_s8]_m", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddd_s64", + "SIMD_ISA": "SVE", + "name": "svabd[_s8]_x", "arguments": [ - "int64_t a", - "int64_t b" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "int64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -2884,26 +3538,37 @@ ], "instructions": [ [ - "ADD" + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddd_u64", + "SIMD_ISA": "SVE", + "name": "svabd[_s8]_z", "arguments": [ - "uint64_t a", - "uint64_t b" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -2911,58 +3576,70 @@ ], "instructions": [ [ - "ADD" + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddh_f16", + "SIMD_ISA": "SVE", + "name": "svabd[_u16]_m", "arguments": [ - "float16_t a", - "float16_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "float16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FADD" + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_high_s16", + "SIMD_ISA": "SVE", + "name": "svabd[_u16]_x", "arguments": [ - "int8x8_t r", - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H|Ztied2.H" }, - "r": { - "register": "Vd.8B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -2970,30 +3647,37 @@ ], "instructions": [ [ - "ADDHN2" + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_high_s32", + "SIMD_ISA": "SVE", + "name": "svabd[_u16]_z", "arguments": [ - "int16x4_t r", - "int32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.H" }, - "r": { - "register": "Vd.4H" + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -3001,30 +3685,35 @@ ], "instructions": [ [ - "ADDHN2" + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_high_s64", + "SIMD_ISA": "SVE", + "name": "svabd[_u32]_m", "arguments": [ - "int32x2_t r", - "int64x2_t a", - "int64x2_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.S" }, - "r": { - "register": "Vd.2S" + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -3032,30 +3721,34 @@ ], "instructions": [ [ - "ADDHN2" + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_high_u16", + "SIMD_ISA": "SVE", + "name": "svabd[_u32]_x", "arguments": [ - "uint8x8_t r", - "uint16x8_t a", - "uint16x8_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S|Ztied2.S" }, - "r": { - "register": "Vd.8B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -3063,30 +3756,37 @@ ], "instructions": [ [ - "ADDHN2" + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_high_u32", + "SIMD_ISA": "SVE", + "name": "svabd[_u32]_z", "arguments": [ - "uint16x4_t r", - "uint32x4_t a", - "uint32x4_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S" }, - "r": { - "register": "Vd.4H" + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -3094,30 +3794,35 @@ ], "instructions": [ [ - "ADDHN2" + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_high_u64", + "SIMD_ISA": "SVE", + "name": "svabd[_u64]_m", "arguments": [ - "uint32x2_t r", - "uint64x2_t a", - "uint64x2_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.D" }, - "r": { - "register": "Vd.2S" + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -3125,200 +3830,240 @@ ], "instructions": [ [ - "ADDHN2" + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_s16", + "SIMD_ISA": "SVE", + "name": "svabd[_u64]_x", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDHN" + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_s32", + "SIMD_ISA": "SVE", + "name": "svabd[_u64]_z", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDHN" + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_s64", + "SIMD_ISA": "SVE", + "name": "svabd[_u8]_m", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDHN" + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_u16", + "SIMD_ISA": "SVE", + "name": "svabd[_u8]_x", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDHN" + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_u32", + "SIMD_ISA": "SVE", + "name": "svabd[_u8]_z", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDHN" + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddhn_u64", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_n_s16]", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDHN" + "SABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_high_s16", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_n_s32]", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -3326,26 +4071,26 @@ ], "instructions": [ [ - "SADDL2" + "SABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_high_s32", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_n_s64]", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -3353,26 +4098,26 @@ ], "instructions": [ [ - "SADDL2" + "SABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_high_s8", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_n_u16]", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ @@ -3380,26 +4125,26 @@ ], "instructions": [ [ - "SADDL2" + "UABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_high_u16", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_n_u32]", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -3407,26 +4152,26 @@ ], "instructions": [ [ - "UADDL2" + "UABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_high_u32", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_n_u64]", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -3434,26 +4179,26 @@ ], "instructions": [ [ - "UADDL2" + "UABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_high_u8", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_s16]", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -3461,196 +4206,188 @@ ], "instructions": [ [ - "UADDL2" + "SABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_s16", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_s32]", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDL" + "SABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_s32", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_s64]", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDL" + "SABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_s8", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_u16]", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDL" + "UABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_u16", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_u32]", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDL" + "UABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_u32", + "SIMD_ISA": "SVE2", + "name": "svabdlb[_u64]", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDL" + "UABDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddl_u8", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_n_s16]", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDL" + "SABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlv_s16", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_n_s32]", "arguments": [ - "int16x4_t a" + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "int32_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -3658,22 +4395,26 @@ ], "instructions": [ [ - "SADDLV" + "SABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlv_s32", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_n_s64]", "arguments": [ - "int32x2_t a" + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "int64_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -3681,22 +4422,26 @@ ], "instructions": [ [ - "SADDLP" + "SABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlv_s8", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_n_u16]", "arguments": [ - "int8x8_t a" + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "int16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ @@ -3704,22 +4449,26 @@ ], "instructions": [ [ - "SADDLV" + "UABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlv_u16", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_n_u32]", "arguments": [ - "uint16x4_t a" + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -3727,22 +4476,26 @@ ], "instructions": [ [ - "UADDLV" + "UABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlv_u32", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_n_u64]", "arguments": [ - "uint32x2_t a" + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -3750,22 +4503,26 @@ ], "instructions": [ [ - "UADDLP" + "UABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlv_u8", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_s16]", "arguments": [ - "uint8x8_t a" + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -3773,22 +4530,26 @@ ], "instructions": [ [ - "UADDLV" + "SABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlvq_s16", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_s32]", "arguments": [ - "int16x8_t a" + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "int32_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -3796,22 +4557,26 @@ ], "instructions": [ [ - "SADDLV" + "SABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlvq_s32", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_s64]", "arguments": [ - "int32x4_t a" + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "int64_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -3819,22 +4584,26 @@ ], "instructions": [ [ - "SADDLV" + "SABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlvq_s8", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_u16]", "arguments": [ - "int8x16_t a" + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "int16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -3842,22 +4611,26 @@ ], "instructions": [ [ - "SADDLV" + "UABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlvq_u16", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_u32]", "arguments": [ - "uint16x8_t a" + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -3865,22 +4638,26 @@ ], "instructions": [ [ - "UADDLV" + "UABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlvq_u32", + "SIMD_ISA": "SVE2", + "name": "svabdlt[_u64]", "arguments": [ - "uint32x4_t a" + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -3888,22 +4665,30 @@ ], "instructions": [ [ - "UADDLV" + "UABDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddlvq_u8", + "SIMD_ISA": "SVE", + "name": "svabs[_f16]_m", "arguments": [ - "uint8x16_t a" + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" ], "return_type": { - "value": "uint16_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -3911,83 +4696,93 @@ ], "instructions": [ [ - "UADDLV" + "FABS" + ], + [ + "MOVPRFX", + "FABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_f16", + "SIMD_ISA": "SVE", + "name": "svabs[_f16]_x", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svfloat16_t op" ], "return_type": { - "value": "float16x8_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op": { + "register": "Zop.H|Ztied.H" }, - "b": { - "register": "Vm.8H" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FADD" + "FABS" + ], + [ + "MOVPRFX", + "FABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_f32", + "SIMD_ISA": "SVE", + "name": "svabs[_f16]_z", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svfloat16_t op" ], "return_type": { - "value": "float32x4_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op": { + "register": "Zop.H" }, - "b": { - "register": "Vm.4S" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FADD" + "MOVPRFX", + "FABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_f64", + "SIMD_ISA": "SVE", + "name": "svabs[_f32]_m", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" ], "return_type": { - "value": "float64x2_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "inactive": { + "register": "Zinactive.S|Ztied.S" }, - "b": { - "register": "Vm.2D" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -3995,370 +4790,406 @@ ], "instructions": [ [ - "FADD" + "FABS" + ], + [ + "MOVPRFX", + "FABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_p128", + "SIMD_ISA": "SVE", + "name": "svabs[_f32]_x", "arguments": [ - "poly128_t a", - "poly128_t b" + "svbool_t pg", + "svfloat32_t op" ], "return_type": { - "value": "poly128_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.S|Ztied.S" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "FABS" + ], + [ + "MOVPRFX", + "FABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_p16", + "SIMD_ISA": "SVE", + "name": "svabs[_f32]_z", "arguments": [ - "poly16x8_t a", - "poly16x8_t b" + "svbool_t pg", + "svfloat32_t op" ], "return_type": { - "value": "poly16x8_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.S" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "MOVPRFX", + "FABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_p64", + "SIMD_ISA": "SVE", + "name": "svabs[_f64]_m", "arguments": [ - "poly64x2_t a", - "poly64x2_t b" + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" ], "return_type": { - "value": "poly64x2_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "inactive": { + "register": "Zinactive.D|Ztied.D" }, - "b": { - "register": "Vm.16B" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "FABS" + ], + [ + "MOVPRFX", + "FABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_p8", + "SIMD_ISA": "SVE", + "name": "svabs[_f64]_x", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "svbool_t pg", + "svfloat64_t op" ], "return_type": { - "value": "poly8x16_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.D|Ztied.D" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "FABS" + ], + [ + "MOVPRFX", + "FABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_s16", + "SIMD_ISA": "SVE", + "name": "svabs[_f64]_z", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svfloat64_t op" ], "return_type": { - "value": "int16x8_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op": { + "register": "Zop.D" }, - "b": { - "register": "Vm.8H" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "MOVPRFX", + "FABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_s32", + "SIMD_ISA": "SVE", + "name": "svabs[_s16]_m", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "int32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "inactive": { + "register": "Zinactive.H|Ztied.H" }, - "b": { - "register": "Vm.4S" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "ABS" + ], + [ + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_s64", + "SIMD_ISA": "SVE", + "name": "svabs[_s16]_x", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "int64x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op": { + "register": "Zop.H|Ztied.H" }, - "b": { - "register": "Vm.2D" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "ABS" + ], + [ + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_s8", + "SIMD_ISA": "SVE", + "name": "svabs[_s16]_z", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "int8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.H" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_u16", + "SIMD_ISA": "SVE", + "name": "svabs[_s32]_m", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "inactive": { + "register": "Zinactive.S|Ztied.S" }, - "b": { - "register": "Vm.8H" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "ABS" + ], + [ + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_u32", + "SIMD_ISA": "SVE", + "name": "svabs[_s32]_x", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op": { + "register": "Zop.S|Ztied.S" }, - "b": { - "register": "Vm.4S" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "ABS" + ], + [ + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_u64", + "SIMD_ISA": "SVE", + "name": "svabs[_s32]_z", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint64x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op": { + "register": "Zop.S" }, - "b": { - "register": "Vm.2D" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddq_u8", + "SIMD_ISA": "SVE", + "name": "svabs[_s64]_m", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "uint8x16_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "inactive": { + "register": "Zinactive.D|Ztied.D" }, - "b": { - "register": "Vm.16B" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADD" + "ABS" + ], + [ + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddv_f32", + "SIMD_ISA": "SVE", + "name": "svabs[_s64]_x", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "float32_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -4366,22 +5197,30 @@ ], "instructions": [ [ - "FADDP" + "ABS" + ], + [ + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddv_s16", + "SIMD_ISA": "SVE", + "name": "svabs[_s64]_z", "arguments": [ - "int16x4_t a" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "int16_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -4389,22 +5228,31 @@ ], "instructions": [ [ - "ADDV" + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddv_s32", + "SIMD_ISA": "SVE", + "name": "svabs[_s8]_m", "arguments": [ - "int32x2_t a" + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "int32_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -4412,22 +5260,30 @@ ], "instructions": [ [ - "ADDP" + "ABS" + ], + [ + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddv_s8", + "SIMD_ISA": "SVE", + "name": "svabs[_s8]_x", "arguments": [ - "int8x8_t a" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "int8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -4435,22 +5291,30 @@ ], "instructions": [ [ - "ADDV" + "ABS" + ], + [ + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddv_u16", + "SIMD_ISA": "SVE", + "name": "svabs[_s8]_z", "arguments": [ - "uint16x4_t a" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "uint16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -4458,22 +5322,31 @@ ], "instructions": [ [ - "ADDV" + "MOVPRFX", + "ABS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddv_u32", + "SIMD_ISA": "SVE", + "name": "svacge[_f16]", "arguments": [ - "uint32x2_t a" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -4481,22 +5354,30 @@ ], "instructions": [ [ - "ADDP" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddv_u8", + "SIMD_ISA": "SVE", + "name": "svacge[_f32]", "arguments": [ - "uint8x8_t a" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "uint8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -4504,22 +5385,30 @@ ], "instructions": [ [ - "ADDV" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_f32", + "SIMD_ISA": "SVE", + "name": "svacge[_f64]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "float32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -4527,23 +5416,30 @@ ], "instructions": [ [ - "FADDP", - "FADDP" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_f64", + "SIMD_ISA": "SVE", + "name": "svacge[_n_f16]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "float64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -4551,22 +5447,30 @@ ], "instructions": [ [ - "FADDP" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_s16", + "SIMD_ISA": "SVE", + "name": "svacge[_n_f32]", "arguments": [ - "int16x8_t a" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "int16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -4574,22 +5478,30 @@ ], "instructions": [ [ - "ADDV" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_s32", + "SIMD_ISA": "SVE", + "name": "svacge[_n_f64]", "arguments": [ - "int32x4_t a" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "int32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -4597,22 +5509,30 @@ ], "instructions": [ [ - "ADDV" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_s64", + "SIMD_ISA": "SVE", + "name": "svacgt[_f16]", "arguments": [ - "int64x2_t a" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -4620,22 +5540,30 @@ ], "instructions": [ [ - "ADDP" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_s8", + "SIMD_ISA": "SVE", + "name": "svacgt[_f32]", "arguments": [ - "int8x16_t a" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "int8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -4643,22 +5571,30 @@ ], "instructions": [ [ - "ADDV" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_u16", + "SIMD_ISA": "SVE", + "name": "svacgt[_f64]", "arguments": [ - "uint16x8_t a" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -4666,22 +5602,30 @@ ], "instructions": [ [ - "ADDV" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_u32", + "SIMD_ISA": "SVE", + "name": "svacgt[_n_f16]", "arguments": [ - "uint32x4_t a" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -4689,22 +5633,30 @@ ], "instructions": [ [ - "ADDV" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_u64", + "SIMD_ISA": "SVE", + "name": "svacgt[_n_f32]", "arguments": [ - "uint64x2_t a" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -4712,22 +5664,30 @@ ], "instructions": [ [ - "ADDP" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddvq_u8", + "SIMD_ISA": "SVE", + "name": "svacgt[_n_f64]", "arguments": [ - "uint8x16_t a" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -4735,26 +5695,30 @@ ], "instructions": [ [ - "ADDV" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_high_s16", + "SIMD_ISA": "SVE", + "name": "svacle[_f16]", "arguments": [ - "int32x4_t a", - "int16x8_t b" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -4762,26 +5726,30 @@ ], "instructions": [ [ - "SADDW2" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_high_s32", + "SIMD_ISA": "SVE", + "name": "svacle[_f32]", "arguments": [ - "int64x2_t a", - "int32x4_t b" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -4789,26 +5757,30 @@ ], "instructions": [ [ - "SADDW2" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_high_s8", + "SIMD_ISA": "SVE", + "name": "svacle[_f64]", "arguments": [ - "int16x8_t a", - "int8x16_t b" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -4816,26 +5788,30 @@ ], "instructions": [ [ - "SADDW2" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_high_u16", + "SIMD_ISA": "SVE", + "name": "svacle[_n_f16]", "arguments": [ - "uint32x4_t a", - "uint16x8_t b" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -4843,26 +5819,30 @@ ], "instructions": [ [ - "UADDW2" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_high_u32", + "SIMD_ISA": "SVE", + "name": "svacle[_n_f32]", "arguments": [ - "uint64x2_t a", - "uint32x4_t b" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -4870,26 +5850,30 @@ ], "instructions": [ [ - "UADDW2" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_high_u8", + "SIMD_ISA": "SVE", + "name": "svacle[_n_f64]", "arguments": [ - "uint16x8_t a", - "uint8x16_t b" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -4897,1518 +5881,1906 @@ ], "instructions": [ [ - "UADDW2" + "FACGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_s16", + "SIMD_ISA": "SVE", + "name": "svaclt[_f16]", "arguments": [ - "int32x4_t a", - "int16x4_t b" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDW" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_s32", + "SIMD_ISA": "SVE", + "name": "svaclt[_f32]", "arguments": [ - "int64x2_t a", - "int32x2_t b" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDW" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_s8", + "SIMD_ISA": "SVE", + "name": "svaclt[_f64]", "arguments": [ - "int16x8_t a", - "int8x8_t b" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDW" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_u16", + "SIMD_ISA": "SVE", + "name": "svaclt[_n_f16]", "arguments": [ - "uint32x4_t a", - "uint16x4_t b" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDW" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_u32", + "SIMD_ISA": "SVE", + "name": "svaclt[_n_f32]", "arguments": [ - "uint64x2_t a", - "uint32x2_t b" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDW" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaddw_u8", + "SIMD_ISA": "SVE", + "name": "svaclt[_n_f64]", "arguments": [ - "uint16x8_t a", - "uint8x8_t b" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDW" + "FACGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaesdq_u8", + "SIMD_ISA": "SVE2", + "name": "svadalp[_s16]_m", "arguments": [ - "uint8x16_t data", - "uint8x16_t key" + "svbool_t pg", + "svint16_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "data": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "key": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "AESD" + "SADALP" + ], + [ + "MOVPRFX", + "SADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaeseq_u8", + "SIMD_ISA": "SVE2", + "name": "svadalp[_s16]_x", "arguments": [ - "uint8x16_t data", - "uint8x16_t key" + "svbool_t pg", + "svint16_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "data": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "key": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "AESE" + "SADALP" + ], + [ + "MOVPRFX", + "SADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaesimcq_u8", + "SIMD_ISA": "SVE2", + "name": "svadalp[_s16]_z", "arguments": [ - "uint8x16_t data" + "svbool_t pg", + "svint16_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "data": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "AESIMC" + "MOVPRFX", + "SADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vaesmcq_u8", + "SIMD_ISA": "SVE2", + "name": "svadalp[_s32]_m", "arguments": [ - "uint8x16_t data" + "svbool_t pg", + "svint32_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "data": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "AESMC" + "SADALP" + ], + [ + "MOVPRFX", + "SADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vand_s16", + "SIMD_ISA": "SVE2", + "name": "svadalp[_s32]_x", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svbool_t pg", + "svint32_t op1", + "svint16_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "SADALP" + ], + [ + "MOVPRFX", + "SADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vand_s32", + "SIMD_ISA": "SVE2", + "name": "svadalp[_s32]_z", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svbool_t pg", + "svint32_t op1", + "svint16_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "MOVPRFX", + "SADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vand_s64", + "SIMD_ISA": "SVE2", + "name": "svadalp[_s64]_m", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "svbool_t pg", + "svint64_t op1", + "svint32_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "SADALP" + ], + [ + "MOVPRFX", + "SADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vand_s8", + "SIMD_ISA": "SVE2", + "name": "svadalp[_s64]_x", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svbool_t pg", + "svint64_t op1", + "svint32_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "SADALP" + ], + [ + "MOVPRFX", + "SADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vand_u16", + "SIMD_ISA": "SVE2", + "name": "svadalp[_s64]_z", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svbool_t pg", + "svint64_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "MOVPRFX", + "SADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vand_u32", + "SIMD_ISA": "SVE2", + "name": "svadalp[_u16]_m", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "UADALP" + ], + [ + "MOVPRFX", + "UADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vand_u64", + "SIMD_ISA": "SVE2", + "name": "svadalp[_u16]_x", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "UADALP" + ], + [ + "MOVPRFX", + "UADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vand_u8", + "SIMD_ISA": "SVE2", + "name": "svadalp[_u16]_z", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "MOVPRFX", + "UADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vandq_s16", + "SIMD_ISA": "SVE2", + "name": "svadalp[_u32]_m", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint16_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "UADALP" + ], + [ + "MOVPRFX", + "UADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vandq_s32", + "SIMD_ISA": "SVE2", + "name": "svadalp[_u32]_x", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "UADALP" + ], + [ + "MOVPRFX", + "UADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vandq_s64", + "SIMD_ISA": "SVE2", + "name": "svadalp[_u32]_z", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint16_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "MOVPRFX", + "UADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vandq_s8", + "SIMD_ISA": "SVE2", + "name": "svadalp[_u64]_m", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint32_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "UADALP" + ], + [ + "MOVPRFX", + "UADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vandq_u16", + "SIMD_ISA": "SVE2", + "name": "svadalp[_u64]_x", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "UADALP" + ], + [ + "MOVPRFX", + "UADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vandq_u32", + "SIMD_ISA": "SVE2", + "name": "svadalp[_u64]_z", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "MOVPRFX", + "UADALP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vandq_u64", + "SIMD_ISA": "SVE2", + "name": "svadclb[_n_u32]", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "ADCLB" + ], + [ + "MOVPRFX", + "ADCLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vandq_u8", + "SIMD_ISA": "SVE2", + "name": "svadclb[_n_u64]", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "AND" + "ADCLB" + ], + [ + "MOVPRFX", + "ADCLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbcaxq_s16", + "SIMD_ISA": "SVE2", + "name": "svadclb[_u32]", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16x8_t c" + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" ], "return_type": { - "value": "int16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" }, - "b": {}, - "c": {} + "op3": { + "register": "Zop3.S" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "BCAX" + "ADCLB" + ], + [ + "MOVPRFX", + "ADCLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbcaxq_s32", + "SIMD_ISA": "SVE2", + "name": "svadclb[_u64]", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32x4_t c" + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" ], "return_type": { - "value": "int32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" }, - "b": {}, - "c": {} + "op3": { + "register": "Zop3.D" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "BCAX" + "ADCLB" + ], + [ + "MOVPRFX", + "ADCLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbcaxq_s64", + "SIMD_ISA": "SVE2", + "name": "svadclt[_n_u32]", "arguments": [ - "int64x2_t a", - "int64x2_t b", - "int64x2_t c" + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" ], "return_type": { - "value": "int64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" }, - "b": {}, - "c": {} + "op3": { + "register": "Zop3.S[*]" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "BCAX" + "ADCLT" + ], + [ + "MOVPRFX", + "ADCLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbcaxq_s8", + "SIMD_ISA": "SVE2", + "name": "svadclt[_n_u64]", "arguments": [ - "int8x16_t a", - "int8x16_t b", - "int8x16_t c" + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" ], "return_type": { - "value": "int8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" }, - "b": {}, - "c": {} + "op3": { + "register": "Zop3.D[*]" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "BCAX" + "ADCLT" + ], + [ + "MOVPRFX", + "ADCLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbcaxq_u16", + "SIMD_ISA": "SVE2", + "name": "svadclt[_u32]", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16x8_t c" + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" }, - "b": {}, - "c": {} + "op3": { + "register": "Zop3.S" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "BCAX" + "ADCLT" + ], + [ + "MOVPRFX", + "ADCLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbcaxq_u32", + "SIMD_ISA": "SVE2", + "name": "svadclt[_u64]", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c" + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" }, - "b": {}, - "c": {} + "op3": { + "register": "Zop3.D" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "BCAX" + "ADCLT" + ], + [ + "MOVPRFX", + "ADCLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbcaxq_u64", + "SIMD_ISA": "SVE", + "name": "svadd[_f16]_m", "arguments": [ - "uint64x2_t a", - "uint64x2_t b", - "uint64x2_t c" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.H" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "BCAX" + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbcaxq_u8", + "SIMD_ISA": "SVE", + "name": "svadd[_f16]_x", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", - "uint8x16_t c" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.H" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "BCAX" + "FADD" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbic_s16", + "SIMD_ISA": "SVE", + "name": "svadd[_f16]_z", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbic_s32", + "SIMD_ISA": "SVE", + "name": "svadd[_f32]_m", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbic_s64", + "SIMD_ISA": "SVE", + "name": "svadd[_f32]_x", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbic_s8", + "SIMD_ISA": "SVE", + "name": "svadd[_f32]_z", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbic_u16", + "SIMD_ISA": "SVE", + "name": "svadd[_f64]_m", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbic_u32", + "SIMD_ISA": "SVE", + "name": "svadd[_f64]_x", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbic_u64", + "SIMD_ISA": "SVE", + "name": "svadd[_f64]_z", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbic_u8", + "SIMD_ISA": "SVE", + "name": "svadd[_n_f16]_m", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Ztied1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "FSUB" + ], + [ + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbicq_s16", + "SIMD_ISA": "SVE", + "name": "svadd[_n_f16]_x", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbicq_s32", + "SIMD_ISA": "SVE", + "name": "svadd[_n_f16]_z", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbicq_s64", + "SIMD_ISA": "SVE", + "name": "svadd[_n_f32]_m", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Ztied1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "FSUB" + ], + [ + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbicq_s8", + "SIMD_ISA": "SVE", + "name": "svadd[_n_f32]_x", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbicq_u16", + "SIMD_ISA": "SVE", + "name": "svadd[_n_f32]_z", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vbicq_u32", + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_f64]_m", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Ztied1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "FSUB" + ], + [ + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbicq_u64", + "SIMD_ISA": "SVE", + "name": "svadd[_n_f64]_x", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "FADD" + ], + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbicq_u8", + "SIMD_ISA": "SVE", + "name": "svadd[_n_f64]_z", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BIC" + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s16]_m", "arguments": [ - "uint16x4_t a", - "float16x4_t b", - "float16x4_t c" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.H[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_f32", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s16]_x", "arguments": [ - "uint32x2_t a", - "float32x2_t b", - "float32x2_t c" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_f64", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s16]_z", "arguments": [ - "uint64x1_t a", - "float64x1_t b", - "float64x1_t c" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "float64x1_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.H[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -6416,458 +7788,526 @@ ], "instructions": [ [ - "BSL" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_p16", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s32]_m", "arguments": [ - "uint16x4_t a", - "poly16x4_t b", - "poly16x4_t c" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "poly16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.S[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_p64", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s32]_x", "arguments": [ - "poly64x1_t a", - "poly64x1_t b", - "poly64x1_t c" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "poly64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_p8", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s32]_z", "arguments": [ - "uint8x8_t a", - "poly8x8_t b", - "poly8x8_t c" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "poly8x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.S[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_s16", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s64]_m", "arguments": [ - "uint16x4_t a", - "int16x4_t b", - "int16x4_t c" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_s32", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s64]_x", "arguments": [ - "uint32x2_t a", - "int32x2_t b", - "int32x2_t c" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_s64", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s64]_z", "arguments": [ - "uint64x1_t a", - "int64x1_t b", - "int64x1_t c" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_s8", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s8]_m", "arguments": [ - "uint8x8_t a", - "int8x8_t b", - "int8x8_t c" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.B[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_u16", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s8]_x", "arguments": [ - "uint16x4_t a", - "uint16x4_t b", - "uint16x4_t c" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_u32", + "SIMD_ISA": "SVE", + "name": "svadd[_n_s8]_z", "arguments": [ - "uint32x2_t a", - "uint32x2_t b", - "uint32x2_t c" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.B[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_u64", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u16]_m", "arguments": [ - "uint64x1_t a", - "uint64x1_t b", - "uint64x1_t c" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.H[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbsl_u8", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u16]_x", "arguments": [ - "uint8x8_t a", - "uint8x8_t b", - "uint8x8_t c" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" }, - "c": { - "register": "Vm.8B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u16]_z", "arguments": [ - "uint16x8_t a", - "float16x8_t b", - "float16x8_t c" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.H[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_f32", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u32]_m", "arguments": [ - "uint32x4_t a", - "float32x4_t b", - "float32x4_t c" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.S[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_f64", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u32]_x", "arguments": [ - "uint64x2_t a", - "float64x2_t b", - "float64x2_t c" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -6875,556 +8315,665 @@ ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_p16", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u32]_z", "arguments": [ - "uint16x8_t a", - "poly16x8_t b", - "poly16x8_t c" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "poly16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.S[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_p64", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u64]_m", "arguments": [ - "poly64x2_t a", - "poly64x2_t b", - "poly64x2_t c" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "poly64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.D[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_p8", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u64]_x", "arguments": [ - "uint8x16_t a", - "poly8x16_t b", - "poly8x16_t c" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "poly8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_s16", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u64]_z", "arguments": [ - "uint16x8_t a", - "int16x8_t b", - "int16x8_t c" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.D[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_s32", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u8]_m", "arguments": [ - "uint32x4_t a", - "int32x4_t b", - "int32x4_t c" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.B[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_s64", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u8]_x", "arguments": [ - "uint64x2_t a", - "int64x2_t b", - "int64x2_t c" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_s8", + "SIMD_ISA": "SVE", + "name": "svadd[_n_u8]_z", "arguments": [ - "uint8x16_t a", - "int8x16_t b", - "int8x16_t c" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.B[*]" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_u16", + "SIMD_ISA": "SVE", + "name": "svadd[_s16]_m", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16x8_t c" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.H" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_u32", + "SIMD_ISA": "SVE", + "name": "svadd[_s16]_x", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.H|Ztied2.H" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_u64", + "SIMD_ISA": "SVE", + "name": "svadd[_s16]_z", "arguments": [ - "uint64x2_t a", - "uint64x2_t b", - "uint64x2_t c" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.H" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vbslq_u8", + "SIMD_ISA": "SVE", + "name": "svadd[_s32]_m", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", - "uint8x16_t c" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.S" }, - "c": { - "register": "Vm.16B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "BSL" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcadd_rot270_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_s32]_x", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H " + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCADD" + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcadd_rot270_f32", + "SIMD_ISA": "SVE", + "name": "svadd[_s32]_z", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S " + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCADD" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcadd_rot90_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_s64]_m", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H " + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCADD" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcadd_rot90_f32", + "SIMD_ISA": "SVE", + "name": "svadd[_s64]_x", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S " + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCADD" + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaddq_rot270_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_s64]_z", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H " + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCADD" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaddq_rot270_f32", + "SIMD_ISA": "SVE", + "name": "svadd[_s8]_m", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S " + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCADD" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaddq_rot270_f64", + "SIMD_ISA": "SVE", + "name": "svadd[_s8]_x", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D " + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -7432,82 +8981,107 @@ ], "instructions": [ [ - "FCADD" + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaddq_rot90_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_s8]_z", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H " + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCADD" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaddq_rot90_f32", + "SIMD_ISA": "SVE", + "name": "svadd[_u16]_m", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S " + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCADD" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaddq_rot90_f64", + "SIMD_ISA": "SVE", + "name": "svadd[_u16]_x", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D " + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -7515,83 +9089,107 @@ ], "instructions": [ [ - "FCADD" + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcage_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_u16]_z", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FACGE" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcage_f32", + "SIMD_ISA": "SVE", + "name": "svadd[_u32]_m", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FACGE" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcage_f64", + "SIMD_ISA": "SVE", + "name": "svadd[_u32]_x", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -7599,26 +9197,36 @@ ], "instructions": [ [ - "FACGE" + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaged_f64", + "SIMD_ISA": "SVE", + "name": "svadd[_u32]_z", "arguments": [ - "float64_t a", - "float64_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -7626,26 +9234,35 @@ ], "instructions": [ [ - "FACGE" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcageh_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_u64]_m", "arguments": [ - "float16_t a", - "float16_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -7653,83 +9270,107 @@ ], "instructions": [ [ - "FACGE" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcageq_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_u64]_x", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FACGE" + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcageq_f32", + "SIMD_ISA": "SVE", + "name": "svadd[_u64]_z", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FACGE" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcageq_f64", + "SIMD_ISA": "SVE", + "name": "svadd[_u8]_m", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -7737,26 +9378,34 @@ ], "instructions": [ [ - "FACGE" + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcages_f32", + "SIMD_ISA": "SVE", + "name": "svadd[_u8]_x", "arguments": [ - "float32_t a", - "float32_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Sm" + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -7764,83 +9413,103 @@ ], "instructions": [ [ - "FACGE" + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcagt_f16", + "SIMD_ISA": "SVE", + "name": "svadd[_u8]_z", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FACGT" + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcagt_f32", + "SIMD_ISA": "SVE", + "name": "svadda[_f16]", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "float16_t initial", + "svfloat16_t op" ], "return_type": { - "value": "uint32x2_t" + "value": "float16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "initial": { + "register": "Htied" }, - "b": { - "register": "Vm.2S" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FACGT" + "FADDA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcagt_f64", + "SIMD_ISA": "SVE", + "name": "svadda[_f32]", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svbool_t pg", + "float32_t initial", + "svfloat32_t op" ], "return_type": { - "value": "uint64x1_t" + "value": "float32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "initial": { + "register": "Stied" }, - "b": { - "register": "Dm" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -7848,26 +9517,30 @@ ], "instructions": [ [ - "FACGT" + "FADDA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcagtd_f64", + "SIMD_ISA": "SVE", + "name": "svadda[_f64]", "arguments": [ - "float64_t a", - "float64_t b" + "svbool_t pg", + "float64_t initial", + "svfloat64_t op" ], "return_type": { - "value": "uint64_t" + "value": "float64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "initial": { + "register": "Dtied" }, - "b": { - "register": "Dm" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -7875,26 +9548,26 @@ ], "instructions": [ [ - "FACGT" + "FADDA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcagth_f16", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_n_s16]", "arguments": [ - "float16_t a", - "float16_t b" + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -7902,83 +9575,80 @@ ], "instructions": [ [ - "FACGT" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcagtq_f16", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_n_s32]", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FACGT" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcagtq_f32", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_n_s64]", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.D[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FACGT" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcagtq_f64", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_n_u16]", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -7986,26 +9656,26 @@ ], "instructions": [ [ - "FACGT" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcagts_f32", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_n_u32]", "arguments": [ - "float32_t a", - "float32_t b" + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Sm" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -8013,83 +9683,80 @@ ], "instructions": [ [ - "FACGT" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcale_f16", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_n_u64]", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.D[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FACGE" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcale_f32", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_s16]", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FACGE" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcale_f64", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_s32]", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -8097,26 +9764,26 @@ ], "instructions": [ [ - "FACGE" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaled_f64", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_s64]", "arguments": [ - "float64_t a", - "float64_t b" + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.D" } }, "Architectures": [ @@ -8124,26 +9791,26 @@ ], "instructions": [ [ - "FACGE" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaleh_f16", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_u16]", "arguments": [ - "float16_t a", - "float16_t b" + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -8151,83 +9818,84 @@ ], "instructions": [ [ - "FACGE" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaleq_f16", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_u32]", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FACGE" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaleq_f32", + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_u64]", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FACGE" + "ADDHNB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaleq_f64", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_n_s16]", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svint8_t even", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "even": { + "register": "Ztied.B" }, - "b": { - "register": "Vm.2D" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -8235,26 +9903,30 @@ ], "instructions": [ [ - "FACGE" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcales_f32", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_n_s32]", "arguments": [ - "float32_t a", - "float32_t b" + "svint16_t even", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "even": { + "register": "Ztied.H" }, - "b": { - "register": "Sm" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -8262,83 +9934,92 @@ ], "instructions": [ [ - "FACGE" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcalt_f16", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_n_s64]", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svint32_t even", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "even": { + "register": "Ztied.S" }, - "b": { - "register": "Vm.4H" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FACGT" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcalt_f32", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_n_u16]", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svuint8_t even", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "even": { + "register": "Ztied.B" }, - "b": { - "register": "Vm.2S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FACGT" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcalt_f64", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_n_u32]", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svuint16_t even", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "even": { + "register": "Ztied.H" }, - "b": { - "register": "Dm" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -8346,26 +10027,30 @@ ], "instructions": [ [ - "FACGT" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaltd_f64", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_n_u64]", "arguments": [ - "float64_t a", - "float64_t b" + "svuint32_t even", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "even": { + "register": "Ztied.S" }, - "b": { - "register": "Dm" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" } }, "Architectures": [ @@ -8373,26 +10058,30 @@ ], "instructions": [ [ - "FACGT" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcalth_f16", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_s16]", "arguments": [ - "float16_t a", - "float16_t b" + "svint8_t even", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "even": { + "register": "Ztied.B" }, - "b": { - "register": "Hm" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -8400,83 +10089,92 @@ ], "instructions": [ [ - "FACGT" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaltq_f16", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_s32]", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svint16_t even", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "even": { + "register": "Ztied.H" }, - "b": { - "register": "Vm.8H" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FACGT" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaltq_f32", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_s64]", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svint32_t even", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "even": { + "register": "Ztied.S" }, - "b": { - "register": "Vm.4S" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FACGT" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcaltq_f64", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_u16]", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svuint8_t even", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "even": { + "register": "Ztied.B" }, - "b": { - "register": "Vm.2D" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -8484,26 +10182,30 @@ ], "instructions": [ [ - "FACGT" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcalts_f32", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_u32]", "arguments": [ - "float32_t a", - "float32_t b" + "svuint16_t even", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "even": { + "register": "Ztied.H" }, - "b": { - "register": "Sm" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -8511,83 +10213,84 @@ ], "instructions": [ [ - "FACGT" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_f16", + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_u64]", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svuint32_t even", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "even": { + "register": "Ztied.S" }, - "b": { - "register": "Vm.4H" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMEQ" + "ADDHNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_f32", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_n_s16]", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMEQ" + "SADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_f64", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_n_s32]", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -8595,141 +10298,134 @@ ], "instructions": [ [ - "FCMEQ" + "SADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_p64", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_n_s64]", "arguments": [ - "poly64x1_t a", - "poly64x1_t b" + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "SADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_p8", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_n_u16]", "arguments": [ - "poly8x8_t a", - "poly8x8_t b" + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "UADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_s16", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_n_u32]", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "UADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_s32", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_n_u64]", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "UADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_s64", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_s16]", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -8737,113 +10433,107 @@ ], "instructions": [ [ - "CMEQ" + "SADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_s8", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_s32]", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "SADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_u16", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_s64]", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "SADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_u32", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_u16]", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "UADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_u64", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_u32]", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -8851,55 +10541,53 @@ ], "instructions": [ [ - "CMEQ" + "UADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceq_u8", + "SIMD_ISA": "SVE2", + "name": "svaddlb[_u64]", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "UADDLB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqd_f64", + "SIMD_ISA": "SVE2", + "name": "svaddlbt[_n_s16]", "arguments": [ - "float64_t a", - "float64_t b" + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ @@ -8907,26 +10595,26 @@ ], "instructions": [ [ - "FCMEQ" + "SADDLBT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqd_s64", + "SIMD_ISA": "SVE2", + "name": "svaddlbt[_n_s32]", "arguments": [ - "int64_t a", - "int64_t b" + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -8934,26 +10622,26 @@ ], "instructions": [ [ - "CMEQ" + "SADDLBT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqd_u64", + "SIMD_ISA": "SVE2", + "name": "svaddlbt[_n_s64]", "arguments": [ - "uint64_t a", - "uint64_t b" + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -8961,26 +10649,26 @@ ], "instructions": [ [ - "CMEQ" + "SADDLBT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqh_f16", + "SIMD_ISA": "SVE2", + "name": "svaddlbt[_s16]", "arguments": [ - "float16_t a", - "float16_t b" + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -8988,83 +10676,80 @@ ], "instructions": [ [ - "FCMEQ" + "SADDLBT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_f16", + "SIMD_ISA": "SVE2", + "name": "svaddlbt[_s32]", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMEQ" + "SADDLBT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_f32", + "SIMD_ISA": "SVE2", + "name": "svaddlbt[_s64]", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMEQ" + "SADDLBT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_f64", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_n_s16]", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ @@ -9072,141 +10757,134 @@ ], "instructions": [ [ - "FCMEQ" + "SADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_p64", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_n_s32]", "arguments": [ - "poly64x2_t a", - "poly64x2_t b" + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "SADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_p8", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_n_s64]", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "SADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_s16", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_n_u16]", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "UADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_s32", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_n_u32]", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "UADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_s64", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_n_u64]", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -9214,113 +10892,107 @@ ], "instructions": [ [ - "CMEQ" + "UADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_s8", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_s16]", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "SADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_u16", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_s32]", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "SADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_u32", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_s64]", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "SADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_u64", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_u16]", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -9328,55 +11000,53 @@ ], "instructions": [ [ - "CMEQ" + "UADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqq_u8", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_u32]", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "UADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqs_f32", + "SIMD_ISA": "SVE2", + "name": "svaddlt[_u64]", "arguments": [ - "float32_t a", - "float32_t b" + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Sm" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -9384,46 +11054,65 @@ ], "instructions": [ [ - "FCMEQ" + "UADDLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_f16", + "SIMD_ISA": "SVE2", + "name": "svaddp[_f16]_m", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMEQ" + "FADDP" + ], + [ + "MOVPRFX", + "FADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_f32", + "SIMD_ISA": "SVE2", + "name": "svaddp[_f16]_x", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -9431,22 +11120,34 @@ ], "instructions": [ [ - "FCMEQ" + "FADDP" + ], + [ + "MOVPRFX", + "FADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_f64", + "SIMD_ISA": "SVE2", + "name": "svaddp[_f32]_m", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -9454,46 +11155,69 @@ ], "instructions": [ [ - "FCMEQ" + "FADDP" + ], + [ + "MOVPRFX", + "FADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_p64", + "SIMD_ISA": "SVE2", + "name": "svaddp[_f32]_x", "arguments": [ - "poly64x1_t a" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "FADDP" + ], + [ + "MOVPRFX", + "FADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_p8", + "SIMD_ISA": "SVE2", + "name": "svaddp[_f64]_m", "arguments": [ - "poly8x8_t a" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -9501,22 +11225,34 @@ ], "instructions": [ [ - "CMEQ" + "FADDP" + ], + [ + "MOVPRFX", + "FADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_s16", + "SIMD_ISA": "SVE2", + "name": "svaddp[_f64]_x", "arguments": [ - "int16x4_t a" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -9524,22 +11260,34 @@ ], "instructions": [ [ - "CMEQ" + "FADDP" + ], + [ + "MOVPRFX", + "FADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_s32", + "SIMD_ISA": "SVE2", + "name": "svaddp[_s16]_m", "arguments": [ - "int32x2_t a" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -9547,22 +11295,34 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_s64", + "SIMD_ISA": "SVE2", + "name": "svaddp[_s16]_x", "arguments": [ - "int64x1_t a" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -9570,22 +11330,34 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_s8", + "SIMD_ISA": "SVE2", + "name": "svaddp[_s32]_m", "arguments": [ - "int8x8_t a" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -9593,22 +11365,34 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_u16", + "SIMD_ISA": "SVE2", + "name": "svaddp[_s32]_x", "arguments": [ - "uint16x4_t a" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -9616,22 +11400,34 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_u32", + "SIMD_ISA": "SVE2", + "name": "svaddp[_s64]_m", "arguments": [ - "uint32x2_t a" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -9639,22 +11435,34 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_u64", + "SIMD_ISA": "SVE2", + "name": "svaddp[_s64]_x", "arguments": [ - "uint64x1_t a" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -9662,22 +11470,34 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqz_u8", + "SIMD_ISA": "SVE2", + "name": "svaddp[_s8]_m", "arguments": [ - "uint8x8_t a" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -9685,22 +11505,34 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzd_f64", + "SIMD_ISA": "SVE2", + "name": "svaddp[_s8]_x", "arguments": [ - "float64_t a" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -9708,22 +11540,34 @@ ], "instructions": [ [ - "FCMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzd_s64", + "SIMD_ISA": "SVE2", + "name": "svaddp[_u16]_m", "arguments": [ - "int64_t a" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -9731,22 +11575,34 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzd_u64", + "SIMD_ISA": "SVE2", + "name": "svaddp[_u16]_x", "arguments": [ - "uint64_t a" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -9754,22 +11610,34 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzh_f16", + "SIMD_ISA": "SVE2", + "name": "svaddp[_u32]_m", "arguments": [ - "float16_t a" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -9777,46 +11645,69 @@ ], "instructions": [ [ - "FCMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_f16", + "SIMD_ISA": "SVE2", + "name": "svaddp[_u32]_x", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_f32", + "SIMD_ISA": "SVE2", + "name": "svaddp[_u64]_m", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -9824,22 +11715,34 @@ ], "instructions": [ [ - "FCMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_f64", + "SIMD_ISA": "SVE2", + "name": "svaddp[_u64]_x", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -9847,46 +11750,69 @@ ], "instructions": [ [ - "FCMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_p64", + "SIMD_ISA": "SVE2", + "name": "svaddp[_u8]_m", "arguments": [ - "poly64x2_t a" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_p8", + "SIMD_ISA": "SVE2", + "name": "svaddp[_u8]_x", "arguments": [ - "poly8x16_t a" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -9894,22 +11820,30 @@ ], "instructions": [ [ - "CMEQ" + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_s16", + "SIMD_ISA": "SVE", + "name": "svaddv[_f16]", "arguments": [ - "int16x8_t a" + "svbool_t pg", + "svfloat16_t op" ], "return_type": { - "value": "uint16x8_t" + "value": "float16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -9917,22 +11851,26 @@ ], "instructions": [ [ - "CMEQ" + "FADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_s32", + "SIMD_ISA": "SVE", + "name": "svaddv[_f32]", "arguments": [ - "int32x4_t a" + "svbool_t pg", + "svfloat32_t op" ], "return_type": { - "value": "uint32x4_t" + "value": "float32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -9940,22 +11878,26 @@ ], "instructions": [ [ - "CMEQ" + "FADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_s64", + "SIMD_ISA": "SVE", + "name": "svaddv[_f64]", "arguments": [ - "int64x2_t a" + "svbool_t pg", + "svfloat64_t op" ], "return_type": { - "value": "uint64x2_t" + "value": "float64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -9963,22 +11905,26 @@ ], "instructions": [ [ - "CMEQ" + "FADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_s8", + "SIMD_ISA": "SVE", + "name": "svaddv[_s16]", "arguments": [ - "int8x16_t a" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "uint8x16_t" + "value": "int64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -9986,22 +11932,26 @@ ], "instructions": [ [ - "CMEQ" + "SADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_u16", + "SIMD_ISA": "SVE", + "name": "svaddv[_s32]", "arguments": [ - "uint16x8_t a" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint16x8_t" + "value": "int64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -10009,22 +11959,26 @@ ], "instructions": [ [ - "CMEQ" + "SADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_u32", + "SIMD_ISA": "SVE", + "name": "svaddv[_s64]", "arguments": [ - "uint32x4_t a" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "uint32x4_t" + "value": "int64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -10032,22 +11986,26 @@ ], "instructions": [ [ - "CMEQ" + "UADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_u64", + "SIMD_ISA": "SVE", + "name": "svaddv[_s8]", "arguments": [ - "uint64x2_t a" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "uint64x2_t" + "value": "int64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -10055,22 +12013,26 @@ ], "instructions": [ [ - "CMEQ" + "SADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzq_u8", + "SIMD_ISA": "SVE", + "name": "svaddv[_u16]", "arguments": [ - "uint8x16_t a" + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "uint8x16_t" + "value": "uint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -10078,22 +12040,26 @@ ], "instructions": [ [ - "CMEQ" + "UADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vceqzs_f32", + "SIMD_ISA": "SVE", + "name": "svaddv[_u32]", "arguments": [ - "float32_t a" + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "uint32_t" + "value": "uint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -10101,83 +12067,80 @@ ], "instructions": [ [ - "FCMEQ" + "UADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_f16", + "SIMD_ISA": "SVE", + "name": "svaddv[_u64]", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "uint16x4_t" + "value": "uint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op": { + "register": "Zop.D" }, - "b": { - "register": "Vm.4H" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGE" + "UADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_f32", + "SIMD_ISA": "SVE", + "name": "svaddv[_u8]", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "uint32x2_t" + "value": "uint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op": { + "register": "Zop.B" }, - "b": { - "register": "Vm.2S" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMGE" + "UADDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_f64", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_n_s16]", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svint16_t op1", + "int8_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ @@ -10185,84 +12148,80 @@ ], "instructions": [ [ - "FCMGE" + "SADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_s16", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_n_s32]", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svint32_t op1", + "int16_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "SADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_s32", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_n_s64]", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svint64_t op1", + "int32_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "SADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_s64", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_n_u16]", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "svuint16_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ @@ -10270,113 +12229,107 @@ ], "instructions": [ [ - "CMGE" + "UADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_s8", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_n_u32]", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svuint32_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "UADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_u16", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_n_u64]", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svuint64_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "UADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_u32", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_s16]", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svint16_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "SADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_u64", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_s32]", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "svint32_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -10384,55 +12337,53 @@ ], "instructions": [ [ - "CMHS" + "SADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcge_u8", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_s64]", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svint64_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "SADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcged_f64", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_u16]", "arguments": [ - "float64_t a", - "float64_t b" + "svuint16_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -10440,26 +12391,26 @@ ], "instructions": [ [ - "FCMGE" + "UADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcged_s64", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_u32]", "arguments": [ - "int64_t a", - "int64_t b" + "svuint32_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -10467,26 +12418,26 @@ ], "instructions": [ [ - "CMGE" + "UADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcged_u64", + "SIMD_ISA": "SVE2", + "name": "svaddwb[_u64]", "arguments": [ - "uint64_t a", - "uint64_t b" + "svuint64_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -10494,26 +12445,26 @@ ], "instructions": [ [ - "CMHS" + "UADDWB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeh_f16", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_n_s16]", "arguments": [ - "float16_t a", - "float16_t b" + "svint16_t op1", + "int8_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ @@ -10521,83 +12472,80 @@ ], "instructions": [ [ - "FCMGE" + "SADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_f16", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_n_s32]", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svint32_t op1", + "int16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGE" + "SADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_f32", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_n_s64]", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svint64_t op1", + "int32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMGE" + "SADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_f64", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_n_u16]", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svuint16_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ @@ -10605,84 +12553,80 @@ ], "instructions": [ [ - "FCMGE" + "UADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_s16", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_n_u32]", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svuint32_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "UADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_s32", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_n_u64]", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svuint64_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "UADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_s64", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_s16]", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svint16_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -10690,113 +12634,107 @@ ], "instructions": [ [ - "CMGE" + "SADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_s8", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_s32]", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svint32_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "SADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_u16", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_s64]", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svint64_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "SADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_u32", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_u16]", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svuint16_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "UADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_u64", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_u32]", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svuint32_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -10804,55 +12742,53 @@ ], "instructions": [ [ - "CMHS" + "UADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgeq_u8", + "SIMD_ISA": "SVE2", + "name": "svaddwt[_u64]", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svuint64_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "UADDWT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcges_f32", + "SIMD_ISA": "SVE", + "name": "svadrb[_u32base]_[s32]offset", "arguments": [ - "float32_t a", - "float32_t b" + "svuint32_t bases", + "svint32_t offsets" ], "return_type": { - "value": "uint32_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "bases": { + "register": "Zbases.S" }, - "b": { - "register": "Sm" + "offsets": { + "register": "Zoffsets.S" } }, "Architectures": [ @@ -10860,46 +12796,53 @@ ], "instructions": [ [ - "FCMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgez_f16", + "SIMD_ISA": "SVE", + "name": "svadrb[_u32base]_[u32]offset", "arguments": [ - "float16x4_t a" + "svuint32_t bases", + "svuint32_t offsets" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "bases": { + "register": "Zbases.S" + }, + "offsets": { + "register": "Zoffsets.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgez_f32", + "SIMD_ISA": "SVE", + "name": "svadrb[_u64base]_[s64]offset", "arguments": [ - "float32x2_t a" + "svuint64_t bases", + "svint64_t offsets" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "bases": { + "register": "Zbases.D" + }, + "offsets": { + "register": "Zoffsets.D" } }, "Architectures": [ @@ -10907,22 +12850,26 @@ ], "instructions": [ [ - "FCMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgez_f64", + "SIMD_ISA": "SVE", + "name": "svadrb[_u64base]_[u64]offset", "arguments": [ - "float64x1_t a" + "svuint64_t bases", + "svuint64_t offsets" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "bases": { + "register": "Zbases.D" + }, + "offsets": { + "register": "Zoffsets.D" } }, "Architectures": [ @@ -10930,22 +12877,26 @@ ], "instructions": [ [ - "FCMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgez_s16", + "SIMD_ISA": "SVE", + "name": "svadrd[_u32base]_[s32]index", "arguments": [ - "int16x4_t a" + "svuint32_t bases", + "svint32_t indices" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "bases": { + "register": "Zbases.S" + }, + "indices": { + "register": "Zindices.S" } }, "Architectures": [ @@ -10953,22 +12904,26 @@ ], "instructions": [ [ - "CMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgez_s32", + "SIMD_ISA": "SVE", + "name": "svadrd[_u32base]_[u32]index", "arguments": [ - "int32x2_t a" + "svuint32_t bases", + "svuint32_t indices" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "bases": { + "register": "Zbases.S" + }, + "indices": { + "register": "Zindices.S" } }, "Architectures": [ @@ -10976,22 +12931,26 @@ ], "instructions": [ [ - "CMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgez_s64", + "SIMD_ISA": "SVE", + "name": "svadrd[_u64base]_[s64]index", "arguments": [ - "int64x1_t a" + "svuint64_t bases", + "svint64_t indices" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "bases": { + "register": "Zbases.D" + }, + "indices": { + "register": "Zindices.D" } }, "Architectures": [ @@ -10999,22 +12958,26 @@ ], "instructions": [ [ - "CMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgez_s8", + "SIMD_ISA": "SVE", + "name": "svadrd[_u64base]_[u64]index", "arguments": [ - "int8x8_t a" + "svuint64_t bases", + "svuint64_t indices" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "bases": { + "register": "Zbases.D" + }, + "indices": { + "register": "Zindices.D" } }, "Architectures": [ @@ -11022,22 +12985,26 @@ ], "instructions": [ [ - "CMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezd_f64", + "SIMD_ISA": "SVE", + "name": "svadrh[_u32base]_[s32]index", "arguments": [ - "float64_t a" + "svuint32_t bases", + "svint32_t indices" ], "return_type": { - "value": "uint64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "bases": { + "register": "Zbases.S" + }, + "indices": { + "register": "Zindices.S" } }, "Architectures": [ @@ -11045,22 +13012,26 @@ ], "instructions": [ [ - "FCMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezd_s64", + "SIMD_ISA": "SVE", + "name": "svadrh[_u32base]_[u32]index", "arguments": [ - "int64_t a" + "svuint32_t bases", + "svuint32_t indices" ], "return_type": { - "value": "uint64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "bases": { + "register": "Zbases.S" + }, + "indices": { + "register": "Zindices.S" } }, "Architectures": [ @@ -11068,22 +13039,26 @@ ], "instructions": [ [ - "CMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezh_f16", + "SIMD_ISA": "SVE", + "name": "svadrh[_u64base]_[s64]index", "arguments": [ - "float16_t a" + "svuint64_t bases", + "svint64_t indices" ], "return_type": { - "value": "uint16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "bases": { + "register": "Zbases.D" + }, + "indices": { + "register": "Zindices.D" } }, "Architectures": [ @@ -11091,46 +13066,53 @@ ], "instructions": [ [ - "FCMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezq_f16", + "SIMD_ISA": "SVE", + "name": "svadrh[_u64base]_[u64]index", "arguments": [ - "float16x8_t a" + "svuint64_t bases", + "svuint64_t indices" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "bases": { + "register": "Zbases.D" + }, + "indices": { + "register": "Zindices.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezq_f32", + "SIMD_ISA": "SVE", + "name": "svadrw[_u32base]_[s32]index", "arguments": [ - "float32x4_t a" + "svuint32_t bases", + "svint32_t indices" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "bases": { + "register": "Zbases.S" + }, + "indices": { + "register": "Zindices.S" } }, "Architectures": [ @@ -11138,22 +13120,26 @@ ], "instructions": [ [ - "FCMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezq_f64", + "SIMD_ISA": "SVE", + "name": "svadrw[_u32base]_[u32]index", "arguments": [ - "float64x2_t a" + "svuint32_t bases", + "svuint32_t indices" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "bases": { + "register": "Zbases.S" + }, + "indices": { + "register": "Zindices.S" } }, "Architectures": [ @@ -11161,22 +13147,26 @@ ], "instructions": [ [ - "FCMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezq_s16", + "SIMD_ISA": "SVE", + "name": "svadrw[_u64base]_[s64]index", "arguments": [ - "int16x8_t a" + "svuint64_t bases", + "svint64_t indices" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "bases": { + "register": "Zbases.D" + }, + "indices": { + "register": "Zindices.D" } }, "Architectures": [ @@ -11184,22 +13174,26 @@ ], "instructions": [ [ - "CMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezq_s32", + "SIMD_ISA": "SVE", + "name": "svadrw[_u64base]_[u64]index", "arguments": [ - "int32x4_t a" + "svuint64_t bases", + "svuint64_t indices" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "bases": { + "register": "Zbases.D" + }, + "indices": { + "register": "Zindices.D" } }, "Architectures": [ @@ -11207,22 +13201,26 @@ ], "instructions": [ [ - "CMGE" + "ADR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezq_s64", + "SIMD_ISA": "SVE2", + "name": "svaesd[_u8]", "arguments": [ - "int64x2_t a" + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" } }, "Architectures": [ @@ -11230,22 +13228,29 @@ ], "instructions": [ [ - "CMGE" + "AESD" + ], + [ + "AESD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezq_s8", + "SIMD_ISA": "SVE2", + "name": "svaese[_u8]", "arguments": [ - "int8x16_t a" + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" } }, "Architectures": [ @@ -11253,22 +13258,25 @@ ], "instructions": [ [ - "CMGE" + "AESE" + ], + [ + "AESE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgezs_f32", + "SIMD_ISA": "SVE2", + "name": "svaesimc[_u8]", "arguments": [ - "float32_t a" + "svuint8_t op" ], "return_type": { - "value": "uint32_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op": { + "register": "Ztied.B" } }, "Architectures": [ @@ -11276,83 +13284,84 @@ ], "instructions": [ [ - "FCMGE" + "AESIMC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_f16", + "SIMD_ISA": "SVE2", + "name": "svaesmc[_u8]", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svuint8_t op" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "op": { + "register": "Ztied.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "AESMC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_f32", + "SIMD_ISA": "SVE", + "name": "svand[_b]_z", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Pop1.B" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_f64", + "SIMD_ISA": "SVE", + "name": "svand[_n_s16]_m", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Ztied1.H" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -11360,84 +13369,113 @@ ], "instructions": [ [ - "FCMGT" + "UXTB" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_s16", + "SIMD_ISA": "SVE", + "name": "svand[_n_s16]_x", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_s32", + "SIMD_ISA": "SVE", + "name": "svand[_n_s16]_z", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "MOVPRFX", + "UXTB" + ], + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_s64", + "SIMD_ISA": "SVE", + "name": "svand[_n_s32]_m", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Ztied1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -11445,113 +13483,160 @@ ], "instructions": [ [ - "CMGT" + "UXTB" + ], + [ + "UXTH" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_s8", + "SIMD_ISA": "SVE", + "name": "svand[_n_s32]_x", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_u16", + "SIMD_ISA": "SVE", + "name": "svand[_n_s32]_z", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "MOVPRFX", + "UXTB" + ], + [ + "MOVPRFX", + "UXTH" + ], + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_u32", + "SIMD_ISA": "SVE", + "name": "svand[_n_s64]_m", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Ztied1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "UXTB" + ], + [ + "UXTH" + ], + [ + "UXTW" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_u64", + "SIMD_ISA": "SVE", + "name": "svand[_n_s64]_x", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -11559,55 +13644,87 @@ ], "instructions": [ [ - "CMHI" + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgt_u8", + "SIMD_ISA": "SVE", + "name": "svand[_n_s64]_z", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "MOVPRFX", + "UXTB" + ], + [ + "MOVPRFX", + "UXTH" + ], + [ + "MOVPRFX", + "UXTW" + ], + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtd_f64", + "SIMD_ISA": "SVE", + "name": "svand[_n_s8]_m", "arguments": [ - "float64_t a", - "float64_t b" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -11615,26 +13732,34 @@ ], "instructions": [ [ - "FCMGT" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtd_s64", + "SIMD_ISA": "SVE", + "name": "svand[_n_s8]_x", "arguments": [ - "int64_t a", - "int64_t b" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -11642,26 +13767,39 @@ ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtd_u64", + "SIMD_ISA": "SVE", + "name": "svand[_n_s8]_z", "arguments": [ - "uint64_t a", - "uint64_t b" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -11669,26 +13807,35 @@ ], "instructions": [ [ - "CMHI" + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgth_f16", + "SIMD_ISA": "SVE", + "name": "svand[_n_u16]_m", "arguments": [ - "float16_t a", - "float16_t b" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Ztied1.H" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -11696,83 +13843,113 @@ ], "instructions": [ [ - "FCMGT" + "UXTB" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_f16", + "SIMD_ISA": "SVE", + "name": "svand[_n_u16]_x", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_f32", + "SIMD_ISA": "SVE", + "name": "svand[_n_u16]_z", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "MOVPRFX", + "UXTB" + ], + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_f64", + "SIMD_ISA": "SVE", + "name": "svand[_n_u32]_m", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Ztied1.S" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -11780,84 +13957,120 @@ ], "instructions": [ [ - "FCMGT" + "UXTB" + ], + [ + "UXTH" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_s16", + "SIMD_ISA": "SVE", + "name": "svand[_n_u32]_x", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_s32", + "SIMD_ISA": "SVE", + "name": "svand[_n_u32]_z", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "MOVPRFX", + "UXTB" + ], + [ + "MOVPRFX", + "UXTH" + ], + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_s64", + "SIMD_ISA": "SVE", + "name": "svand[_n_u64]_m", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Ztied1.D" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -11865,113 +14078,162 @@ ], "instructions": [ [ - "CMGT" + "UXTB" + ], + [ + "UXTH" + ], + [ + "UXTW" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_s8", + "SIMD_ISA": "SVE", + "name": "svand[_n_u64]_x", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_u16", + "SIMD_ISA": "SVE", + "name": "svand[_n_u64]_z", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "MOVPRFX", + "UXTB" + ], + [ + "MOVPRFX", + "UXTH" + ], + [ + "MOVPRFX", + "UXTW" + ], + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_u32", + "SIMD_ISA": "SVE", + "name": "svand[_n_u8]_m", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_u64", + "SIMD_ISA": "SVE", + "name": "svand[_n_u8]_x", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -11979,55 +14241,75 @@ ], "instructions": [ [ - "CMHI" + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtq_u8", + "SIMD_ISA": "SVE", + "name": "svand[_n_u8]_z", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgts_f32", + "SIMD_ISA": "SVE", + "name": "svand[_s16]_m", "arguments": [ - "float32_t a", - "float32_t b" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Sm" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -12035,46 +14317,71 @@ ], "instructions": [ [ - "FCMGT" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtz_f16", + "SIMD_ISA": "SVE", + "name": "svand[_s16]_x", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtz_f32", + "SIMD_ISA": "SVE", + "name": "svand[_s16]_z", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -12082,22 +14389,35 @@ ], "instructions": [ [ - "FCMGT" + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtz_f64", + "SIMD_ISA": "SVE", + "name": "svand[_s32]_m", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -12105,22 +14425,34 @@ ], "instructions": [ [ - "FCMGT" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtz_s16", + "SIMD_ISA": "SVE", + "name": "svand[_s32]_x", "arguments": [ - "int16x4_t a" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -12128,22 +14460,36 @@ ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtz_s32", + "SIMD_ISA": "SVE", + "name": "svand[_s32]_z", "arguments": [ - "int32x2_t a" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -12151,22 +14497,35 @@ ], "instructions": [ [ - "CMGT" + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtz_s64", + "SIMD_ISA": "SVE", + "name": "svand[_s64]_m", "arguments": [ - "int64x1_t a" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -12174,22 +14533,34 @@ ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtz_s8", + "SIMD_ISA": "SVE", + "name": "svand[_s64]_x", "arguments": [ - "int8x8_t a" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -12197,22 +14568,36 @@ ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzd_f64", + "SIMD_ISA": "SVE", + "name": "svand[_s64]_z", "arguments": [ - "float64_t a" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -12220,22 +14605,35 @@ ], "instructions": [ [ - "FCMGT" + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzd_s64", + "SIMD_ISA": "SVE", + "name": "svand[_s8]_m", "arguments": [ - "int64_t a" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -12243,22 +14641,34 @@ ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzh_f16", + "SIMD_ISA": "SVE", + "name": "svand[_s8]_x", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -12266,46 +14676,72 @@ ], "instructions": [ [ - "FCMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzq_f16", + "SIMD_ISA": "SVE", + "name": "svand[_s8]_z", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzq_f32", + "SIMD_ISA": "SVE", + "name": "svand[_u16]_m", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -12313,22 +14749,34 @@ ], "instructions": [ [ - "FCMGT" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzq_f64", + "SIMD_ISA": "SVE", + "name": "svand[_u16]_x", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -12336,22 +14784,36 @@ ], "instructions": [ [ - "FCMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzq_s16", + "SIMD_ISA": "SVE", + "name": "svand[_u16]_z", "arguments": [ - "int16x8_t a" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -12359,22 +14821,35 @@ ], "instructions": [ [ - "CMGT" + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzq_s32", + "SIMD_ISA": "SVE", + "name": "svand[_u32]_m", "arguments": [ - "int32x4_t a" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -12382,22 +14857,34 @@ ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzq_s64", + "SIMD_ISA": "SVE", + "name": "svand[_u32]_x", "arguments": [ - "int64x2_t a" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -12405,22 +14892,36 @@ ], "instructions": [ [ - "CMGT" + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzq_s8", + "SIMD_ISA": "SVE", + "name": "svand[_u32]_z", "arguments": [ - "int8x16_t a" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -12428,22 +14929,35 @@ ], "instructions": [ [ - "CMGT" + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcgtzs_f32", + "SIMD_ISA": "SVE", + "name": "svand[_u64]_m", "arguments": [ - "float32_t a" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -12451,83 +14965,107 @@ ], "instructions": [ [ - "FCMGT" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_f16", + "SIMD_ISA": "SVE", + "name": "svand[_u64]_x", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGE" + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_f32", + "SIMD_ISA": "SVE", + "name": "svand[_u64]_z", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMGE" - ] - ] + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_f64", + "SIMD_ISA": "SVE", + "name": "svand[_u8]_m", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -12535,84 +15073,103 @@ ], "instructions": [ [ - "FCMGE" + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_s16", + "SIMD_ISA": "SVE", + "name": "svand[_u8]_x", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "AND" + ], + [ + "AND" + ], + [ + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_s32", + "SIMD_ISA": "SVE", + "name": "svand[_u8]_z", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_s64", + "SIMD_ISA": "SVE", + "name": "svandv[_s16]", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "uint64x1_t" + "value": "int16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.H" }, - "b": { - "register": "Dm" + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -12620,113 +15177,107 @@ ], "instructions": [ [ - "CMGE" + "ANDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_s8", + "SIMD_ISA": "SVE", + "name": "svandv[_s32]", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint8x8_t" + "value": "int32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.S" }, - "b": { - "register": "Vm.8B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "ANDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_u16", + "SIMD_ISA": "SVE", + "name": "svandv[_s64]", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "uint16x4_t" + "value": "int64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op": { + "register": "Zop.D" }, - "b": { - "register": "Vm.4H" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "ANDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_u32", + "SIMD_ISA": "SVE", + "name": "svandv[_s8]", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "uint32x2_t" + "value": "int8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op": { + "register": "Zop.B" }, - "b": { - "register": "Vm.2S" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "ANDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_u64", + "SIMD_ISA": "SVE", + "name": "svandv[_u16]", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "uint64x1_t" + "value": "uint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.H" }, - "b": { - "register": "Dm" + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -12734,55 +15285,53 @@ ], "instructions": [ [ - "CMHS" + "ANDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcle_u8", + "SIMD_ISA": "SVE", + "name": "svandv[_u32]", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "uint8x8_t" + "value": "uint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.S" }, - "b": { - "register": "Vm.8B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "ANDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcled_f64", + "SIMD_ISA": "SVE", + "name": "svandv[_u64]", "arguments": [ - "float64_t a", - "float64_t b" + "svbool_t pg", + "svuint64_t op" ], "return_type": { "value": "uint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.D" }, - "b": { - "register": "Dm" + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -12790,26 +15339,26 @@ ], "instructions": [ [ - "FCMGE" + "ANDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcled_s64", + "SIMD_ISA": "SVE", + "name": "svandv[_u8]", "arguments": [ - "int64_t a", - "int64_t b" + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "uint64_t" + "value": "uint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.B" }, - "b": { - "register": "Dm" + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -12817,26 +15366,30 @@ ], "instructions": [ [ - "CMGE" + "ANDV" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcled_u64", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s16]_m", "arguments": [ - "uint64_t a", - "uint64_t b" + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Ztied1.H" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -12844,26 +15397,33 @@ ], "instructions": [ [ - "CMHS" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleh_f16", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s16]_x", "arguments": [ - "float16_t a", - "float16_t b" + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -12871,83 +15431,114 @@ ], "instructions": [ [ - "FCMGE" + "ASR" + ], + [ + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_f16", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s16]_z", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGE" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_f32", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s32]_m", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Ztied1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMGE" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_f64", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s32]_x", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -12955,84 +15546,114 @@ ], "instructions": [ [ - "FCMGE" + "ASR" + ], + [ + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_s16", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s32]_z", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_s32", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s64]_m", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Ztied1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_s64", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s64]_x", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -13040,113 +15661,155 @@ ], "instructions": [ [ - "CMGE" + "ASR" + ], + [ + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_s8", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s64]_z", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGE" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_u16", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s8]_m", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Ztied1.B" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_u32", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s8]_x", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "ASR" + ], + [ + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_u64", + "SIMD_ISA": "SVE", + "name": "svasr[_n_s8]_z", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -13154,55 +15817,74 @@ ], "instructions": [ [ - "CMHS" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcleq_u8", + "SIMD_ISA": "SVE", + "name": "svasr[_s16]_m", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHS" + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcles_f32", + "SIMD_ISA": "SVE", + "name": "svasr[_s16]_x", "arguments": [ - "float32_t a", - "float32_t b" + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Sm" + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -13210,46 +15892,73 @@ ], "instructions": [ [ - "FCMGE" + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclez_f16", + "SIMD_ISA": "SVE", + "name": "svasr[_s16]_z", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLE" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclez_f32", + "SIMD_ISA": "SVE", + "name": "svasr[_s32]_m", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -13257,22 +15966,34 @@ ], "instructions": [ [ - "CMLE" + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclez_f64", + "SIMD_ISA": "SVE", + "name": "svasr[_s32]_x", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -13280,22 +16001,37 @@ ], "instructions": [ [ - "FCMLE" + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclez_s16", + "SIMD_ISA": "SVE", + "name": "svasr[_s32]_z", "arguments": [ - "int16x4_t a" + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -13303,22 +16039,35 @@ ], "instructions": [ [ - "CMLE" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclez_s32", + "SIMD_ISA": "SVE", + "name": "svasr[_s64]_m", "arguments": [ - "int32x2_t a" + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -13326,22 +16075,34 @@ ], "instructions": [ [ - "CMLE" + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclez_s64", + "SIMD_ISA": "SVE", + "name": "svasr[_s64]_x", "arguments": [ - "int64x1_t a" + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -13349,22 +16110,37 @@ ], "instructions": [ [ - "CMLE" + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclez_s8", + "SIMD_ISA": "SVE", + "name": "svasr[_s64]_z", "arguments": [ - "int8x8_t a" + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -13372,22 +16148,35 @@ ], "instructions": [ [ - "CMLE" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezd_f64", + "SIMD_ISA": "SVE", + "name": "svasr[_s8]_m", "arguments": [ - "float64_t a" + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -13395,22 +16184,34 @@ ], "instructions": [ [ - "FCMLE" + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezd_s64", + "SIMD_ISA": "SVE", + "name": "svasr[_s8]_x", "arguments": [ - "int64_t a" + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -13418,22 +16219,37 @@ ], "instructions": [ [ - "CMLE" + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezh_f16", + "SIMD_ISA": "SVE", + "name": "svasr[_s8]_z", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -13441,46 +16257,69 @@ ], "instructions": [ [ - "FCMLE" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezq_f16", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s16]_m", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svint16_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLE" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezq_f32", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s16]_x", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svint16_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -13488,22 +16327,36 @@ ], "instructions": [ [ - "FCMLE" + "ASR" + ], + [ + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezq_f64", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s16]_z", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svint16_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -13511,22 +16364,35 @@ ], "instructions": [ [ - "FCMLE" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezq_s16", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s32]_m", "arguments": [ - "int16x8_t a" + "svbool_t pg", + "svint32_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -13534,22 +16400,33 @@ ], "instructions": [ [ - "CMLE" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezq_s32", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s32]_x", "arguments": [ - "int32x4_t a" + "svbool_t pg", + "svint32_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -13557,22 +16434,36 @@ ], "instructions": [ [ - "CMLE" + "ASR" + ], + [ + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezq_s64", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s32]_z", "arguments": [ - "int64x2_t a" + "svbool_t pg", + "svint32_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -13580,22 +16471,35 @@ ], "instructions": [ [ - "CMLE" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezq_s8", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s8]_m", "arguments": [ - "int8x16_t a" + "svbool_t pg", + "svint8_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -13603,22 +16507,33 @@ ], "instructions": [ [ - "CMLE" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclezs_f32", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s8]_x", "arguments": [ - "float32_t a" + "svbool_t pg", + "svint8_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -13626,383 +16541,517 @@ ], "instructions": [ [ - "FCMLE" + "ASR" + ], + [ + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcls_s16", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s8]_z", "arguments": [ - "int16x4_t a" + "svbool_t pg", + "svint8_t op1", + "uint64_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcls_s32", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s16]_m", "arguments": [ - "int32x2_t a" + "svbool_t pg", + "svint16_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcls_s8", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s16]_x", "arguments": [ - "int8x8_t a" + "svbool_t pg", + "svint16_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcls_u16", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s16]_z", "arguments": [ - "uint16x4_t a" + "svbool_t pg", + "svint16_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcls_u32", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s32]_m", "arguments": [ - "uint32x2_t a" + "svbool_t pg", + "svint32_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcls_u8", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s32]_x", "arguments": [ - "uint8x8_t a" + "svbool_t pg", + "svint32_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclsq_s16", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s32]_z", "arguments": [ - "int16x8_t a" + "svbool_t pg", + "svint32_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclsq_s32", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s8]_m", "arguments": [ - "int32x4_t a" + "svbool_t pg", + "svint8_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "ASR" + ], + [ + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclsq_s8", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s8]_x", "arguments": [ - "int8x16_t a" + "svbool_t pg", + "svint8_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "ASR" + ], + [ + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclsq_u16", + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s8]_z", "arguments": [ - "uint16x8_t a" + "svbool_t pg", + "svint8_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "MOVPRFX", + "ASR" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclsq_u32", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s16]_m", "arguments": [ - "uint32x4_t a" + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" ], "return_type": { - "value": "int32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclsq_u8", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s16]_x", "arguments": [ - "uint8x16_t a" + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" ], "return_type": { - "value": "int8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLS" + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_f16", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s16]_z", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "imm2": { + "minimum": 1, + "maximum": 16 }, - "b": { - "register": "Vm.4H" + "op1": { + "register": "Zop1.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_f32", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s32]_m", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "imm2": { + "minimum": 1, + "maximum": 32 }, - "b": { - "register": "Vm.2S" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_f64", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s32]_x", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "imm2": { + "minimum": 1, + "maximum": 32 }, - "b": { - "register": "Dm" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -14010,84 +17059,104 @@ ], "instructions": [ [ - "FCMGT" + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_s16", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s32]_z", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "imm2": { + "minimum": 1, + "maximum": 32 }, - "b": { - "register": "Vm.4H" + "op1": { + "register": "Zop1.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_s32", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s64]_m", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "imm2": { + "minimum": 1, + "maximum": 64 }, - "b": { - "register": "Vm.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_s64", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s64]_x", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "imm2": { + "minimum": 1, + "maximum": 64 }, - "b": { - "register": "Dm" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -14095,113 +17164,140 @@ ], "instructions": [ [ - "CMGT" + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_s8", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s64]_z", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint8x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "imm2": { + "minimum": 1, + "maximum": 64 }, - "b": { - "register": "Vm.8B" + "op1": { + "register": "Zop1.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_u16", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s8]_m", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint16x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "imm2": { + "minimum": 1, + "maximum": 8 }, - "b": { - "register": "Vm.4H" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_u32", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s8]_x", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint32x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "imm2": { + "minimum": 1, + "maximum": 8 }, - "b": { - "register": "Vm.2S" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_u64", + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s8]_z", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" ], "return_type": { - "value": "uint64x1_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "imm2": { + "minimum": 1, + "maximum": 8 }, - "b": { - "register": "Dm" + "op1": { + "register": "Zop1.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -14209,55 +17305,66 @@ ], "instructions": [ [ - "CMHI" + "MOVPRFX", + "ASRD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclt_u8", + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_s16]", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svint16_t op1", + "svint16_t op2", + "int16_t op3" ], "return_type": { - "value": "uint8x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltd_f64", + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_s32]", "arguments": [ - "float64_t a", - "float64_t b" + "svint32_t op1", + "svint32_t op2", + "int32_t op3" ], "return_type": { - "value": "uint64_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ @@ -14265,26 +17372,34 @@ ], "instructions": [ [ - "FCMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltd_s64", + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_s64]", "arguments": [ - "int64_t a", - "int64_t b" + "svint64_t op1", + "svint64_t op2", + "int64_t op3" ], "return_type": { - "value": "uint64_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ @@ -14292,26 +17407,34 @@ ], "instructions": [ [ - "CMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltd_u64", + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_s8]", "arguments": [ - "uint64_t a", - "uint64_t b" + "svint8_t op1", + "svint8_t op2", + "int8_t op3" ], "return_type": { - "value": "uint64_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ @@ -14319,26 +17442,34 @@ ], "instructions": [ [ - "CMHI" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclth_f16", + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_u16]", "arguments": [ - "float16_t a", - "float16_t b" + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" ], "return_type": { - "value": "uint16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ @@ -14346,83 +17477,104 @@ ], "instructions": [ [ - "FCMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_f16", + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_u32]", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_f32", + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_u64]", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_f64", + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_u8]", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ @@ -14430,84 +17582,104 @@ ], "instructions": [ [ - "FCMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_s16", + "SIMD_ISA": "SVE2", + "name": "svbcax[_s16]", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_s32", + "SIMD_ISA": "SVE2", + "name": "svbcax[_s32]", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" ], "return_type": { - "value": "uint32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_s64", + "SIMD_ISA": "SVE2", + "name": "svbcax[_s64]", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" ], "return_type": { - "value": "uint64x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" } }, "Architectures": [ @@ -14515,113 +17687,139 @@ ], "instructions": [ [ - "CMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_s8", + "SIMD_ISA": "SVE2", + "name": "svbcax[_s8]", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" ], "return_type": { - "value": "uint8x16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMGT" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_u16", + "SIMD_ISA": "SVE2", + "name": "svbcax[_u16]", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_u32", + "SIMD_ISA": "SVE2", + "name": "svbcax[_u32]", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_u64", + "SIMD_ISA": "SVE2", + "name": "svbcax[_u64]", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" } }, "Architectures": [ @@ -14629,55 +17827,65 @@ ], "instructions": [ [ - "CMHI" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltq_u8", + "SIMD_ISA": "SVE2", + "name": "svbcax[_u8]", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMHI" + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclts_f32", + "SIMD_ISA": "SVE2", + "name": "svbdep[_n_u16]", "arguments": [ - "float32_t a", - "float32_t b" + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Sm" + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -14685,46 +17893,53 @@ ], "instructions": [ [ - "FCMGT" + "BDEP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltz_f16", + "SIMD_ISA": "SVE2", + "name": "svbdep[_n_u32]", "arguments": [ - "float16x4_t a" + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLT" + "BDEP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltz_f32", + "SIMD_ISA": "SVE2", + "name": "svbdep[_n_u64]", "arguments": [ - "float32x2_t a" + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" } }, "Architectures": [ @@ -14732,22 +17947,26 @@ ], "instructions": [ [ - "FCMLT" + "BDEP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltz_f64", + "SIMD_ISA": "SVE2", + "name": "svbdep[_n_u8]", "arguments": [ - "float64x1_t a" + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ @@ -14755,22 +17974,26 @@ ], "instructions": [ [ - "FCMLT" + "BDEP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltz_s16", + "SIMD_ISA": "SVE2", + "name": "svbdep[_u16]", "arguments": [ - "int16x4_t a" + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -14778,22 +18001,26 @@ ], "instructions": [ [ - "CMLT" + "BDEP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltz_s32", + "SIMD_ISA": "SVE2", + "name": "svbdep[_u32]", "arguments": [ - "int32x2_t a" + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -14801,22 +18028,26 @@ ], "instructions": [ [ - "CMLT" + "BDEP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltz_s64", + "SIMD_ISA": "SVE2", + "name": "svbdep[_u64]", "arguments": [ - "int64x1_t a" + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" } }, "Architectures": [ @@ -14824,22 +18055,26 @@ ], "instructions": [ [ - "CMLT" + "BDEP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltz_s8", + "SIMD_ISA": "SVE2", + "name": "svbdep[_u8]", "arguments": [ - "int8x8_t a" + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -14847,22 +18082,26 @@ ], "instructions": [ [ - "CMLT" + "BDEP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzd_f64", + "SIMD_ISA": "SVE2", + "name": "svbext[_n_u16]", "arguments": [ - "float64_t a" + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -14870,22 +18109,26 @@ ], "instructions": [ [ - "FCMLT" + "BEXT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzd_s64", + "SIMD_ISA": "SVE2", + "name": "svbext[_n_u32]", "arguments": [ - "int64_t a" + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -14893,22 +18136,26 @@ ], "instructions": [ [ - "CMLT" + "BEXT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzh_f16", + "SIMD_ISA": "SVE2", + "name": "svbext[_n_u64]", "arguments": [ - "float16_t a" + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" } }, "Architectures": [ @@ -14916,46 +18163,53 @@ ], "instructions": [ [ - "FCMLT" + "BEXT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzq_f16", + "SIMD_ISA": "SVE2", + "name": "svbext[_n_u8]", "arguments": [ - "float16x8_t a" + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLT" + "BEXT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzq_f32", + "SIMD_ISA": "SVE2", + "name": "svbext[_u16]", "arguments": [ - "float32x4_t a" + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -14963,22 +18217,26 @@ ], "instructions": [ [ - "FCMLT" + "BEXT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzq_f64", + "SIMD_ISA": "SVE2", + "name": "svbext[_u32]", "arguments": [ - "float64x2_t a" + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -14986,22 +18244,26 @@ ], "instructions": [ [ - "FCMLT" + "BEXT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzq_s16", + "SIMD_ISA": "SVE2", + "name": "svbext[_u64]", "arguments": [ - "int16x8_t a" + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" } }, "Architectures": [ @@ -15009,22 +18271,26 @@ ], "instructions": [ [ - "CMLT" + "BEXT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzq_s32", + "SIMD_ISA": "SVE2", + "name": "svbext[_u8]", "arguments": [ - "int32x4_t a" + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -15032,22 +18298,26 @@ ], "instructions": [ [ - "CMLT" + "BEXT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzq_s64", + "SIMD_ISA": "SVE2", + "name": "svbgrp[_n_u16]", "arguments": [ - "int64x2_t a" + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" } }, "Architectures": [ @@ -15055,22 +18325,26 @@ ], "instructions": [ [ - "CMLT" + "BGRP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzq_s8", + "SIMD_ISA": "SVE2", + "name": "svbgrp[_n_u32]", "arguments": [ - "int8x16_t a" + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" } }, "Architectures": [ @@ -15078,22 +18352,26 @@ ], "instructions": [ [ - "CMLT" + "BGRP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcltzs_f32", + "SIMD_ISA": "SVE2", + "name": "svbgrp[_n_u64]", "arguments": [ - "float32_t a" + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" } }, "Architectures": [ @@ -15101,1138 +18379,1299 @@ ], "instructions": [ [ - "FCMLT" + "BGRP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclz_s16", + "SIMD_ISA": "SVE2", + "name": "svbgrp[_n_u8]", "arguments": [ - "int16x4_t a" + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "BGRP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclz_s32", + "SIMD_ISA": "SVE2", + "name": "svbgrp[_u16]", "arguments": [ - "int32x2_t a" + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "BGRP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclz_s8", + "SIMD_ISA": "SVE2", + "name": "svbgrp[_u32]", "arguments": [ - "int8x8_t a" + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "BGRP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclz_u16", + "SIMD_ISA": "SVE2", + "name": "svbgrp[_u64]", "arguments": [ - "uint16x4_t a" + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "BGRP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclz_u32", + "SIMD_ISA": "SVE2", + "name": "svbgrp[_u8]", "arguments": [ - "uint32x2_t a" + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "BGRP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclz_u8", + "SIMD_ISA": "SVE", + "name": "svbic[_b]_z", "arguments": [ - "uint8x8_t a" + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclzq_s16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s16]_m", "arguments": [ - "int16x8_t a" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclzq_s32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s16]_x", "arguments": [ - "int32x4_t a" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "AND" + ], + [ + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclzq_s8", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s16]_z", "arguments": [ - "int8x16_t a" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclzq_u16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s32]_m", "arguments": [ - "uint16x8_t a" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclzq_u32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s32]_x", "arguments": [ - "uint32x4_t a" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "AND" + ], + [ + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vclzq_u8", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s32]_z", "arguments": [ - "uint8x16_t a" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CLZ" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s64]_m", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s64]_x", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "AND" + ], + [ + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_lane_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s64]_z", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x4_t b", - "const int lane" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.D" }, - "r": { - "register": "Vd.4H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_lane_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s8]_m", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x2_t b", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "r": { - "register": "Vd.2S" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s8]_x", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x8_t b", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "r": { - "register": "Vd.4H" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "AND" + ], + [ + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_s8]_z", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x4_t b", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B" }, - "r": { - "register": "Vd.2S" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP", - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot180_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u16]_m", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot180_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u16]_x", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "AND" + ], + [ + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot180_lane_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u16]_z", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x4_t b", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.H" }, - "r": { - "register": "Vd.4H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot180_lane_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u32]_m", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x2_t b", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "r": { - "register": "Vd.2S" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot180_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u32]_x", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x8_t b", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "r": { - "register": "Vd.4H" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "AND" + ], + [ + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot180_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u32]_z", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x4_t b", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S" }, - "r": { - "register": "Vd.2S" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP", - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot270_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u64]_m", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot270_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u64]_x", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "AND" + ], + [ + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot270_lane_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u64]_z", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x4_t b", - "const int lane" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.D" }, - "r": { - "register": "Vd.4H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot270_lane_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u8]_m", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x2_t b", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "r": { - "register": "Vd.2S" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot270_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u8]_x", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x8_t b", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "r": { - "register": "Vd.4H" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "AND" + ], + [ + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot270_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_n_u8]_z", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x4_t b", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B" }, - "r": { - "register": "Vd.2S" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP", - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot90_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_s16]_m", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot90_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_s16]_x", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot90_lane_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_s16]_z", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x4_t b", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.H" }, - "r": { - "register": "Vd.4H" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot90_lane_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_s32]_m", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x2_t b", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "r": { - "register": "Vd.2S" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot90_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_s32]_x", "arguments": [ - "float16x4_t r", - "float16x4_t a", - "float16x8_t b", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "r": { - "register": "Vd.4H" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmla_rot90_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_s32]_z", "arguments": [ - "float32x2_t r", - "float32x2_t a", - "float32x4_t b", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S" }, - "r": { - "register": "Vd.2S" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP", - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_s64]_m", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.8H" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_s64]_x", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_f64", + "SIMD_ISA": "SVE", + "name": "svbic[_s64]_z", "arguments": [ - "float64x2_t r", - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.2D" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -16240,214 +19679,233 @@ ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_lane_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_s8]_m", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x4_t b", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "r": { - "register": "Vd.8H" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_lane_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_s8]_x", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x2_t b", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "r": { - "register": "Vd.4S" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_s8]_z", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x8_t b", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.B" }, - "r": { - "register": "Vd.8H" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_u16]_m", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x4_t b", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "r": { - "register": "Vd.4S" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot180_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_u16]_x", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot180_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_u16]_z", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot180_f64", + "SIMD_ISA": "SVE", + "name": "svbic[_u32]_m", "arguments": [ - "float64x2_t r", - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.2D" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -16455,214 +19913,236 @@ ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot180_lane_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_u32]_x", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x4_t b", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "r": { - "register": "Vd.8H" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot180_lane_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_u32]_z", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x2_t b", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.S" }, - "r": { - "register": "Vd.4S" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot180_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_u64]_m", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x8_t b", - "const int lane" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "r": { - "register": "Vd.8H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot180_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_u64]_x", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x4_t b", - "const int lane" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "r": { - "register": "Vd.4S" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot270_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_u64]_z", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.8H" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot270_f32", + "SIMD_ISA": "SVE", + "name": "svbic[_u8]_m", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot270_f64", + "SIMD_ISA": "SVE", + "name": "svbic[_u8]_x", "arguments": [ - "float64x2_t r", - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -16670,214 +20150,212 @@ ], "instructions": [ [ - "FCMLA" + "BIC" + ], + [ + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot270_lane_f16", + "SIMD_ISA": "SVE", + "name": "svbic[_u8]_z", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x4_t b", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B" }, - "r": { - "register": "Vd.8H" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "MOVPRFX", + "BIC" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot270_lane_f32", + "SIMD_ISA": "SVE", + "name": "svbrka[_b]_m", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x2_t b", - "const int lane" + "svbool_t inactive", + "svbool_t pg", + "svbool_t op" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 0 + "inactive": { + "register": "Ptied.B" }, - "r": { - "register": "Vd.4S" + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BRKA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot270_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svbrka[_b]_z", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x8_t b", - "const int lane" + "svbool_t pg", + "svbool_t op" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 3 + "op": { + "register": "Pop.B" }, - "r": { - "register": "Vd.8H" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BRKA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot270_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svbrkb[_b]_m", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x4_t b", - "const int lane" + "svbool_t inactive", + "svbool_t pg", + "svbool_t op" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "inactive": { + "register": "Ptied.B" }, - "r": { - "register": "Vd.4S" + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BRKB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot90_f16", + "SIMD_ISA": "SVE", + "name": "svbrkb[_b]_z", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svbool_t op" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.8H" + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BRKB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot90_f32", + "SIMD_ISA": "SVE", + "name": "svbrkn[_b]_z", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.4S" + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Ptied2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BRKN" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot90_f64", + "SIMD_ISA": "SVE", + "name": "svbrkpa[_b]_z", "arguments": [ - "float64x2_t r", - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "r": { - "register": "Vd.2D" + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -16885,368 +20363,446 @@ ], "instructions": [ [ - "FCMLA" + "BRKPA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot90_lane_f16", + "SIMD_ISA": "SVE", + "name": "svbrkpb[_b]_z", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x4_t b", - "const int lane" + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Pop1.B" }, - "r": { - "register": "Vd.8H" + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BRKPB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot90_lane_f32", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_s16]", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x2_t b", - "const int lane" + "svint16_t op1", + "svint16_t op2", + "int16_t op3" ], "return_type": { - "value": "float32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "r": { - "register": "Vd.4S" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot90_laneq_f16", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_s32]", "arguments": [ - "float16x8_t r", - "float16x8_t a", - "float16x8_t b", - "const int lane" + "svint32_t op1", + "svint32_t op2", + "int32_t op3" ], "return_type": { - "value": "float16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "r": { - "register": "Vd.8H" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcmlaq_rot90_laneq_f32", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_s64]", "arguments": [ - "float32x4_t r", - "float32x4_t a", - "float32x4_t b", - "const int lane" + "svint64_t op1", + "svint64_t op2", + "int64_t op3" ], "return_type": { - "value": "float32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "r": { - "register": "Vd.4S" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCMLA" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcnt_p8", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_s8]", "arguments": [ - "poly8x8_t a" + "svint8_t op1", + "svint8_t op2", + "int8_t op3" ], "return_type": { - "value": "poly8x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CNT" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcnt_s8", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_u16]", "arguments": [ - "int8x8_t a" + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" ], "return_type": { - "value": "int8x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CNT" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcnt_u8", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_u32]", "arguments": [ - "uint8x8_t a" + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CNT" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcntq_p8", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_u64]", "arguments": [ - "poly8x16_t a" + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" ], "return_type": { - "value": "poly8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CNT" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcntq_s8", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_u8]", "arguments": [ - "int8x16_t a" + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" ], "return_type": { - "value": "int8x16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CNT" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcntq_u8", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_s16]", "arguments": [ - "uint8x16_t a" + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" ], "return_type": { - "value": "uint8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CNT" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_f16", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_s32]", "arguments": [ - "float16x4_t low", - "float16x4_t high" + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" ], "return_type": { - "value": "float16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "low": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_f32", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_s64]", "arguments": [ - "float32x2_t low", - "float32x2_t high" + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" ], "return_type": { - "value": "float32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.2S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "low": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_f64", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_s8]", "arguments": [ - "float64x1_t low", - "float64x1_t high" + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" ], "return_type": { - "value": "float64x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.1D" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "low": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" } }, "Architectures": [ @@ -17254,366 +20810,419 @@ ], "instructions": [ [ - "DUP", - "INS" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_p16", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_u16]", "arguments": [ - "poly16x4_t low", - "poly16x4_t high" + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" ], "return_type": { - "value": "poly16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "low": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_p64", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_u32]", "arguments": [ - "poly64x1_t low", - "poly64x1_t high" + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" ], "return_type": { - "value": "poly64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.1D" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "low": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_p8", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_u64]", "arguments": [ - "poly8x8_t low", - "poly8x8_t high" + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" ], "return_type": { - "value": "poly8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "low": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_s16", + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_u8]", "arguments": [ - "int16x4_t low", - "int16x4_t high" + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" ], "return_type": { - "value": "int16x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.4H" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "low": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_s32", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_s16]", "arguments": [ - "int32x2_t low", - "int32x2_t high" + "svint16_t op1", + "svint16_t op2", + "int16_t op3" ], "return_type": { - "value": "int32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "low": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_s64", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_s32]", "arguments": [ - "int64x1_t low", - "int64x1_t high" + "svint32_t op1", + "svint32_t op2", + "int32_t op3" ], "return_type": { - "value": "int64x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.1D" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "low": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_s8", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_s64]", "arguments": [ - "int8x8_t low", - "int8x8_t high" + "svint64_t op1", + "svint64_t op2", + "int64_t op3" ], "return_type": { - "value": "int8x16_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "low": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_u16", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_s8]", "arguments": [ - "uint16x4_t low", - "uint16x4_t high" + "svint8_t op1", + "svint8_t op2", + "int8_t op3" ], "return_type": { - "value": "uint16x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.4H" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "low": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_u32", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_u16]", "arguments": [ - "uint32x2_t low", - "uint32x2_t high" + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "low": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_u64", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_u32]", "arguments": [ - "uint64x1_t low", - "uint64x1_t high" + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.1D" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "low": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcombine_u8", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_u64]", "arguments": [ - "uint8x8_t low", - "uint8x8_t high" + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "high": { - "register": "Vm.8B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "low": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP", - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_f32", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_u8]", "arguments": [ - "float32x2_t a", - "const int lane1", - "float32x2_t b", - "const int lane2" + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" ], "return_type": { - "value": "float32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.B" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ @@ -17621,36 +21230,34 @@ ], "instructions": [ [ - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_f64", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_s16]", "arguments": [ - "float64x1_t a", - "const int lane1", - "float64x1_t b", - "const int lane2" + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" ], "return_type": { - "value": "float64x1_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "UNUSED" - }, - "b": { - "register": "Vn.1D" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "lane1": { - "minimum": 0, - "maximum": 0 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 0 + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -17658,36 +21265,34 @@ ], "instructions": [ [ - "DUP" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_p16", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_s32]", "arguments": [ - "poly16x4_t a", - "const int lane1", - "poly16x4_t b", - "const int lane2" + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" ], "return_type": { - "value": "poly16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.S" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op3": { + "register": "Zop3.S" } }, "Architectures": [ @@ -17695,74 +21300,69 @@ ], "instructions": [ [ - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_p64", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_s64]", "arguments": [ - "poly64x1_t a", - "const int lane1", - "poly64x1_t b", - "const int lane2" + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" ], "return_type": { - "value": "poly64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "UNUSED" - }, - "b": { - "register": "Vn.1D" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "lane1": { - "minimum": 0, - "maximum": 0 + "op2": { + "register": "Zop2.D" }, - "lane2": { - "minimum": 0, - "maximum": 0 + "op3": { + "register": "Zop3.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_p8", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_s8]", "arguments": [ - "poly8x8_t a", - "const int lane1", - "poly8x8_t b", - "const int lane2" + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" ], "return_type": { - "value": "poly8x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vn.8B" - }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.B" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op3": { + "register": "Zop3.B" } }, "Architectures": [ @@ -17770,36 +21370,34 @@ ], "instructions": [ [ - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_s16", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_u16]", "arguments": [ - "int16x4_t a", - "const int lane1", - "int16x4_t b", - "const int lane2" + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" ], "return_type": { - "value": "int16x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.4H" - }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -17807,36 +21405,34 @@ ], "instructions": [ [ - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_s32", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_u32]", "arguments": [ - "int32x2_t a", - "const int lane1", - "int32x2_t b", - "const int lane2" + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" ], "return_type": { - "value": "int32x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vn.2S" - }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.S" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op3": { + "register": "Zop3.S" } }, "Architectures": [ @@ -17844,36 +21440,34 @@ ], "instructions": [ [ - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_s64", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_u64]", "arguments": [ - "int64x1_t a", - "const int lane1", - "int64x1_t b", - "const int lane2" + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" ], "return_type": { - "value": "int64x1_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "UNUSED" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.1D" - }, - "lane1": { - "minimum": 0, - "maximum": 0 + "op2": { + "register": "Zop2.D" }, - "lane2": { - "minimum": 0, - "maximum": 0 + "op3": { + "register": "Zop3.D" } }, "Architectures": [ @@ -17881,36 +21475,34 @@ ], "instructions": [ [ - "DUP" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_s8", + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_u8]", "arguments": [ - "int8x8_t a", - "const int lane1", - "int8x8_t b", - "const int lane2" + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" ], "return_type": { - "value": "int8x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "b": { - "register": "Vn.8B" - }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.B" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op3": { + "register": "Zop3.B" } }, "Architectures": [ @@ -17918,36 +21510,34 @@ ], "instructions": [ [ - "INS" + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_u16", + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_s16]", "arguments": [ - "uint16x4_t a", - "const int lane1", - "uint16x4_t b", - "const int lane2" + "svint16_t op1", + "svint16_t op2", + "int16_t op3" ], "return_type": { - "value": "uint16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.4H" - }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ @@ -17955,36 +21545,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_u32", + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_s32]", "arguments": [ - "uint32x2_t a", - "const int lane1", - "uint32x2_t b", - "const int lane2" + "svint32_t op1", + "svint32_t op2", + "int32_t op3" ], "return_type": { - "value": "uint32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.S" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ @@ -17992,36 +21580,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_u64", + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_s64]", "arguments": [ - "uint64x1_t a", - "const int lane1", - "uint64x1_t b", - "const int lane2" + "svint64_t op1", + "svint64_t op2", + "int64_t op3" ], "return_type": { - "value": "uint64x1_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "UNUSED" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.1D" - }, - "lane1": { - "minimum": 0, - "maximum": 0 + "op2": { + "register": "Zop2.D" }, - "lane2": { - "minimum": 0, - "maximum": 0 + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ @@ -18029,36 +21615,34 @@ ], "instructions": [ [ - "DUP" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_lane_u8", + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_s8]", "arguments": [ - "uint8x8_t a", - "const int lane1", - "uint8x8_t b", - "const int lane2" + "svint8_t op1", + "svint8_t op2", + "int8_t op3" ], "return_type": { - "value": "uint8x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.B" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ @@ -18066,36 +21650,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_f32", + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_u16]", "arguments": [ - "float32x2_t a", - "const int lane1", - "float32x4_t b", - "const int lane2" + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" ], "return_type": { - "value": "float32x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vn.4S" - }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ @@ -18103,36 +21685,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_f64", + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_u32]", "arguments": [ - "float64x1_t a", - "const int lane1", - "float64x2_t b", - "const int lane2" + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" ], "return_type": { - "value": "float64x1_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "UNUSED" - }, - "b": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane1": { - "minimum": 0, - "maximum": 0 + "op2": { + "register": "Zop2.S" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op3": { + "register": "Zop3.S[*]" } }, "Architectures": [ @@ -18140,36 +21720,34 @@ ], "instructions": [ [ - "DUP" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_p16", + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_u64]", "arguments": [ - "poly16x4_t a", - "const int lane1", - "poly16x8_t b", - "const int lane2" + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" ], "return_type": { - "value": "poly16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.8H" - }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.D" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op3": { + "register": "Zop3.D[*]" } }, "Architectures": [ @@ -18177,74 +21755,69 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_p64", + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_u8]", "arguments": [ - "poly64x1_t a", - "const int lane1", - "poly64x2_t b", - "const int lane2" + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" ], "return_type": { - "value": "poly64x1_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "UNUSED" - }, - "b": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "lane1": { - "minimum": 0, - "maximum": 0 + "op2": { + "register": "Zop2.B" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op3": { + "register": "Zop3.B[*]" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_p8", + "SIMD_ISA": "SVE2", + "name": "svbsl[_s16]", "arguments": [ - "poly8x8_t a", - "const int lane1", - "poly8x16_t b", - "const int lane2" + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" ], "return_type": { - "value": "poly8x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 15 + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -18252,36 +21825,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_s16", + "SIMD_ISA": "SVE2", + "name": "svbsl[_s32]", "arguments": [ - "int16x4_t a", - "const int lane1", - "int16x8_t b", - "const int lane2" + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" ], "return_type": { - "value": "int16x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.S" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op3": { + "register": "Zop3.S" } }, "Architectures": [ @@ -18289,36 +21860,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_s32", + "SIMD_ISA": "SVE2", + "name": "svbsl[_s64]", "arguments": [ - "int32x2_t a", - "const int lane1", - "int32x4_t b", - "const int lane2" + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" ], "return_type": { - "value": "int32x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.D" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op3": { + "register": "Zop3.D" } }, "Architectures": [ @@ -18326,36 +21895,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_s64", + "SIMD_ISA": "SVE2", + "name": "svbsl[_s8]", "arguments": [ - "int64x1_t a", - "const int lane1", - "int64x2_t b", - "const int lane2" + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" ], "return_type": { - "value": "int64x1_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "UNUSED" - }, - "b": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "lane1": { - "minimum": 0, - "maximum": 0 + "op2": { + "register": "Zop2.B" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op3": { + "register": "Zop3.B" } }, "Architectures": [ @@ -18363,36 +21930,34 @@ ], "instructions": [ [ - "DUP" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_s8", + "SIMD_ISA": "SVE2", + "name": "svbsl[_u16]", "arguments": [ - "int8x8_t a", - "const int lane1", - "int8x16_t b", - "const int lane2" + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" ], "return_type": { - "value": "int8x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 15 + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -18400,36 +21965,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_u16", + "SIMD_ISA": "SVE2", + "name": "svbsl[_u32]", "arguments": [ - "uint16x4_t a", - "const int lane1", - "uint16x8_t b", - "const int lane2" + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.S" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op3": { + "register": "Zop3.S" } }, "Architectures": [ @@ -18437,36 +22000,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_u32", + "SIMD_ISA": "SVE2", + "name": "svbsl[_u64]", "arguments": [ - "uint32x2_t a", - "const int lane1", - "uint32x4_t b", - "const int lane2" + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.D" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op3": { + "register": "Zop3.D" } }, "Architectures": [ @@ -18474,36 +22035,34 @@ ], "instructions": [ [ - "INS" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_u64", + "SIMD_ISA": "SVE2", + "name": "svbsl[_u8]", "arguments": [ - "uint64x1_t a", - "const int lane1", - "uint64x2_t b", - "const int lane2" + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "UNUSED" - }, - "b": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "lane1": { - "minimum": 0, - "maximum": 0 + "op2": { + "register": "Zop2.B" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op3": { + "register": "Zop3.B" } }, "Architectures": [ @@ -18511,36 +22070,35 @@ ], "instructions": [ [ - "DUP" + "BSL" + ], + [ + "MOVPRFX", + "BSL" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopy_laneq_u8", + "SIMD_ISA": "SVE", + "name": "svcadd[_f16]_m", "arguments": [ - "uint8x8_t a", - "const int lane1", - "uint8x16_t b", - "const int lane2" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint8x8_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 15 + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -18548,36 +22106,35 @@ ], "instructions": [ [ - "INS" + "FCADD" + ], + [ + "MOVPRFX", + "FCADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_f32", + "SIMD_ISA": "SVE", + "name": "svcadd[_f16]_x", "arguments": [ - "float32x4_t a", - "const int lane1", - "float32x2_t b", - "const int lane2" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "float32x4_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -18585,36 +22142,35 @@ ], "instructions": [ [ - "INS" + "FCADD" + ], + [ + "MOVPRFX", + "FCADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_f64", + "SIMD_ISA": "SVE", + "name": "svcadd[_f16]_z", "arguments": [ - "float64x2_t a", - "const int lane1", - "float64x1_t b", - "const int lane2" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "float64x2_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.1D" + "op1": { + "register": "Zop1.H" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 0 + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -18622,36 +22178,32 @@ ], "instructions": [ [ - "INS" + "MOVPRFX", + "FCADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_p16", + "SIMD_ISA": "SVE", + "name": "svcadd[_f32]_m", "arguments": [ - "poly16x8_t a", - "const int lane1", - "poly16x4_t b", - "const int lane2" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "poly16x8_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.S" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -18659,74 +22211,71 @@ ], "instructions": [ [ - "INS" + "FCADD" + ], + [ + "MOVPRFX", + "FCADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_p64", + "SIMD_ISA": "SVE", + "name": "svcadd[_f32]_x", "arguments": [ - "poly64x2_t a", - "const int lane1", - "poly64x1_t b", - "const int lane2" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "poly64x2_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.1D" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.S" }, - "lane2": { - "minimum": 0, - "maximum": 0 + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "INS" + "FCADD" + ], + [ + "MOVPRFX", + "FCADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_p8", + "SIMD_ISA": "SVE", + "name": "svcadd[_f32]_z", "arguments": [ - "poly8x16_t a", - "const int lane1", - "poly8x8_t b", - "const int lane2" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "poly8x16_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.S" }, - "lane1": { - "minimum": 0, - "maximum": 15 + "op2": { + "register": "Zop2.S" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -18734,36 +22283,32 @@ ], "instructions": [ [ - "INS" + "MOVPRFX", + "FCADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_s16", + "SIMD_ISA": "SVE", + "name": "svcadd[_f64]_m", "arguments": [ - "int16x8_t a", - "const int lane1", - "int16x4_t b", - "const int lane2" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "int16x8_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.D" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -18771,36 +22316,35 @@ ], "instructions": [ [ - "INS" + "FCADD" + ], + [ + "MOVPRFX", + "FCADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_s32", + "SIMD_ISA": "SVE", + "name": "svcadd[_f64]_x", "arguments": [ - "int32x4_t a", - "const int lane1", - "int32x2_t b", - "const int lane2" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "int32x4_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.2S" - }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.D" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -18808,36 +22352,35 @@ ], "instructions": [ [ - "INS" + "FCADD" + ], + [ + "MOVPRFX", + "FCADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_s64", + "SIMD_ISA": "SVE", + "name": "svcadd[_f64]_z", "arguments": [ - "int64x2_t a", - "const int lane1", - "int64x1_t b", - "const int lane2" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "int64x2_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.1D" + "op1": { + "register": "Zop1.D" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.D" }, - "lane2": { - "minimum": 0, - "maximum": 0 + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -18845,36 +22388,28 @@ ], "instructions": [ [ - "INS" + "MOVPRFX", + "FCADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_s8", + "SIMD_ISA": "SVE2", + "name": "svcadd[_s16]", "arguments": [ - "int8x16_t a", - "const int lane1", - "int8x8_t b", - "const int lane2" + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "int8x16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.8B" - }, - "lane1": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -18882,36 +22417,31 @@ ], "instructions": [ [ - "INS" + "CADD" + ], + [ + "MOVPRFX", + "CADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_u16", + "SIMD_ISA": "SVE2", + "name": "svcadd[_s32]", "arguments": [ - "uint16x8_t a", - "const int lane1", - "uint16x4_t b", - "const int lane2" + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.4H" - }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -18919,36 +22449,31 @@ ], "instructions": [ [ - "INS" + "CADD" + ], + [ + "MOVPRFX", + "CADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_u32", + "SIMD_ISA": "SVE2", + "name": "svcadd[_s64]", "arguments": [ - "uint32x4_t a", - "const int lane1", - "uint32x2_t b", - "const int lane2" + "svint64_t op1", + "svint64_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.2S" - }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.D" } }, "Architectures": [ @@ -18956,36 +22481,31 @@ ], "instructions": [ [ - "INS" + "CADD" + ], + [ + "MOVPRFX", + "CADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_u64", + "SIMD_ISA": "SVE2", + "name": "svcadd[_s8]", "arguments": [ - "uint64x2_t a", - "const int lane1", - "uint64x1_t b", - "const int lane2" + "svint8_t op1", + "svint8_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint64x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.1D" - }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "lane2": { - "minimum": 0, - "maximum": 0 + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -18993,36 +22513,31 @@ ], "instructions": [ [ - "INS" + "CADD" + ], + [ + "MOVPRFX", + "CADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_lane_u8", + "SIMD_ISA": "SVE2", + "name": "svcadd[_u16]", "arguments": [ - "uint8x16_t a", - "const int lane1", - "uint8x8_t b", - "const int lane2" + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.8B" - }, - "lane1": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.H" } }, "Architectures": [ @@ -19030,36 +22545,31 @@ ], "instructions": [ [ - "INS" + "CADD" + ], + [ + "MOVPRFX", + "CADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_f32", + "SIMD_ISA": "SVE2", + "name": "svcadd[_u32]", "arguments": [ - "float32x4_t a", - "const int lane1", - "float32x4_t b", - "const int lane2" + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "float32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.S" } }, "Architectures": [ @@ -19067,36 +22577,31 @@ ], "instructions": [ [ - "INS" + "CADD" + ], + [ + "MOVPRFX", + "CADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_f64", + "SIMD_ISA": "SVE2", + "name": "svcadd[_u64]", "arguments": [ - "float64x2_t a", - "const int lane1", - "float64x2_t b", - "const int lane2" + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "float64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "b": { - "register": "Vn.2D" - }, - "lane1": { - "minimum": 0, - "maximum": 1 - }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.D" } }, "Architectures": [ @@ -19104,36 +22609,31 @@ ], "instructions": [ [ - "INS" + "CADD" + ], + [ + "MOVPRFX", + "CADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_p16", + "SIMD_ISA": "SVE2", + "name": "svcadd[_u8]", "arguments": [ - "poly16x8_t a", - "const int lane1", - "poly16x8_t b", - "const int lane2" + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm_rotation" ], "return_type": { - "value": "poly16x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.B|Ztied1.B" }, - "lane1": { - "minimum": 0, - "maximum": 7 - }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.B" } }, "Architectures": [ @@ -19141,74 +22641,71 @@ ], "instructions": [ [ - "INS" + "CADD" + ], + [ + "MOVPRFX", + "CADD" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_p64", + "SIMD_ISA": "SVE2", + "name": "svcdot[_s32]", "arguments": [ - "poly64x2_t a", - "const int lane1", - "poly64x2_t b", - "const int lane2" + "svint32_t op1", + "svint8_t op2", + "svint8_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "poly64x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.B" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op3": { + "register": "Zop3.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "INS" + "CDOT" + ], + [ + "MOVPRFX", + "CDOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_p8", + "SIMD_ISA": "SVE2", + "name": "svcdot[_s64]", "arguments": [ - "poly8x16_t a", - "const int lane1", - "poly8x16_t b", - "const int lane2" + "svint64_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "poly8x16_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "lane1": { - "minimum": 0, - "maximum": 15 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 15 + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -19216,36 +22713,40 @@ ], "instructions": [ [ - "INS" + "CDOT" + ], + [ + "MOVPRFX", + "CDOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_s16", + "SIMD_ISA": "SVE2", + "name": "svcdot_lane[_s32]", "arguments": [ - "int16x8_t a", - "const int lane1", - "int16x8_t b", - "const int lane2" + "svint32_t op1", + "svint8_t op2", + "svint8_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" ], "return_type": { - "value": "int16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" + "imm_index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "lane1": { - "minimum": 0, - "maximum": 7 + "op2": { + "register": "Zop2.B" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op3": { + "register": "Zop3.B" } }, "Architectures": [ @@ -19253,36 +22754,40 @@ ], "instructions": [ [ - "INS" + "CDOT" + ], + [ + "MOVPRFX", + "CDOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_s32", + "SIMD_ISA": "SVE2", + "name": "svcdot_lane[_s64]", "arguments": [ - "int32x4_t a", - "const int lane1", - "int32x4_t b", - "const int lane2" + "svint64_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" ], "return_type": { - "value": "int32x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" + "imm_index": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "lane1": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.H" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -19290,36 +22795,34 @@ ], "instructions": [ [ - "INS" + "CDOT" + ], + [ + "MOVPRFX", + "CDOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_s64", + "SIMD_ISA": "SVE2", + "name": "svclamp[_f16]", "arguments": [ - "int64x2_t a", - "const int lane1", - "int64x2_t b", - "const int lane2" + "svfloat16_t op", + "svfloat16_t min", + "svfloat16_t max" ], "return_type": { - "value": "int64x2_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.2D" + "max": { + "register": "Zreg3.H" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "min": { + "register": "Zreg2.H" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op": { + "register": "Zreg1.H" } }, "Architectures": [ @@ -19327,36 +22830,30 @@ ], "instructions": [ [ - "INS" + "FCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_s8", + "SIMD_ISA": "SVE2", + "name": "svclamp[_f32]", "arguments": [ - "int8x16_t a", - "const int lane1", - "int8x16_t b", - "const int lane2" + "svfloat32_t op", + "svfloat32_t min", + "svfloat32_t max" ], "return_type": { - "value": "int8x16_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "max": { + "register": "Zreg3.S" }, - "b": { - "register": "Vn.16B" - }, - "lane1": { - "minimum": 0, - "maximum": 15 + "min": { + "register": "Zreg2.S" }, - "lane2": { - "minimum": 0, - "maximum": 15 + "op": { + "register": "Zreg1.S" } }, "Architectures": [ @@ -19364,36 +22861,30 @@ ], "instructions": [ [ - "INS" + "FCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_u16", + "SIMD_ISA": "SVE2", + "name": "svclamp[_f64]", "arguments": [ - "uint16x8_t a", - "const int lane1", - "uint16x8_t b", - "const int lane2" + "svfloat64_t op", + "svfloat64_t min", + "svfloat64_t max" ], "return_type": { - "value": "uint16x8_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" + "max": { + "register": "Zreg3.D" }, - "lane1": { - "minimum": 0, - "maximum": 7 + "min": { + "register": "Zreg2.D" }, - "lane2": { - "minimum": 0, - "maximum": 7 + "op": { + "register": "Zreg1.D" } }, "Architectures": [ @@ -19401,36 +22892,30 @@ ], "instructions": [ [ - "INS" + "FCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_u32", + "SIMD_ISA": "SVE2", + "name": "svclamp[_s16]", "arguments": [ - "uint32x4_t a", - "const int lane1", - "uint32x4_t b", - "const int lane2" + "svint16_t op", + "svint16_t min", + "svint16_t max" ], "return_type": { - "value": "uint32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" + "max": { + "register": "Zreg3.H" }, - "lane1": { - "minimum": 0, - "maximum": 3 + "min": { + "register": "Zreg2.H" }, - "lane2": { - "minimum": 0, - "maximum": 3 + "op": { + "register": "Zreg1.H" } }, "Architectures": [ @@ -19438,36 +22923,30 @@ ], "instructions": [ [ - "INS" + "SCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_u64", + "SIMD_ISA": "SVE2", + "name": "svclamp[_s32]", "arguments": [ - "uint64x2_t a", - "const int lane1", - "uint64x2_t b", - "const int lane2" + "svint32_t op", + "svint32_t min", + "svint32_t max" ], "return_type": { - "value": "uint64x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.2D" + "max": { + "register": "Zreg3.S" }, - "lane1": { - "minimum": 0, - "maximum": 1 + "min": { + "register": "Zreg2.S" }, - "lane2": { - "minimum": 0, - "maximum": 1 + "op": { + "register": "Zreg1.S" } }, "Architectures": [ @@ -19475,36 +22954,30 @@ ], "instructions": [ [ - "INS" + "SCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcopyq_laneq_u8", + "SIMD_ISA": "SVE2", + "name": "svclamp[_s64]", "arguments": [ - "uint8x16_t a", - "const int lane1", - "uint8x16_t b", - "const int lane2" + "svint64_t op", + "svint64_t min", + "svint64_t max" ], "return_type": { - "value": "uint8x16_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.16B" + "max": { + "register": "Zreg3.D" }, - "b": { - "register": "Vn.16B" - }, - "lane1": { - "minimum": 0, - "maximum": 15 + "min": { + "register": "Zreg2.D" }, - "lane2": { - "minimum": 0, - "maximum": 15 + "op": { + "register": "Zreg1.D" } }, "Architectures": [ @@ -19512,72 +22985,92 @@ ], "instructions": [ [ - "INS" + "SCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_f16", + "SIMD_ISA": "SVE2", + "name": "svclamp[_s8]", "arguments": [ - "uint64_t a" + "svint8_t op", + "svint8_t min", + "svint8_t max" ], "return_type": { - "value": "float16x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "max": { + "register": "Zreg3.B" + }, + "min": { + "register": "Zreg2.B" + }, + "op": { + "register": "Zreg1.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "SCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_f32", + "SIMD_ISA": "SVE2", + "name": "svclamp[_u16]", "arguments": [ - "uint64_t a" + "svuint16_t op", + "svuint16_t min", + "svuint16_t max" ], "return_type": { - "value": "float32x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "max": { + "register": "Zreg3.H" + }, + "min": { + "register": "Zreg2.H" + }, + "op": { + "register": "Zreg1.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "UCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_f64", + "SIMD_ISA": "SVE2", + "name": "svclamp[_u32]", "arguments": [ - "uint64_t a" + "svuint32_t op", + "svuint32_t min", + "svuint32_t max" ], "return_type": { - "value": "float64x1_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "max": { + "register": "Zreg3.S" + }, + "min": { + "register": "Zreg2.S" + }, + "op": { + "register": "Zreg1.S" } }, "Architectures": [ @@ -19585,394 +23078,537 @@ ], "instructions": [ [ - "INS" + "UCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_p16", + "SIMD_ISA": "SVE2", + "name": "svclamp[_u64]", "arguments": [ - "uint64_t a" + "svuint64_t op", + "svuint64_t min", + "svuint64_t max" ], "return_type": { - "value": "poly16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "max": { + "register": "Zreg3.D" + }, + "min": { + "register": "Zreg2.D" + }, + "op": { + "register": "Zreg1.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "UCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_p64", + "SIMD_ISA": "SVE2", + "name": "svclamp[_u8]", "arguments": [ - "uint64_t a" + "svuint8_t op", + "svuint8_t min", + "svuint8_t max" ], "return_type": { - "value": "poly64x1_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "max": { + "register": "Zreg3.B" + }, + "min": { + "register": "Zreg2.B" + }, + "op": { + "register": "Zreg1.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "INS" + "UCLAMP" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_p8", + "SIMD_ISA": "SVE", + "name": "svclasta[_f16]", "arguments": [ - "uint64_t a" + "svbool_t pg", + "svfloat16_t fallback", + "svfloat16_t data" ], "return_type": { - "value": "poly8x8_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Zfallback.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_s16", + "SIMD_ISA": "SVE", + "name": "svclasta[_f32]", "arguments": [ - "uint64_t a" + "svbool_t pg", + "svfloat32_t fallback", + "svfloat32_t data" ], "return_type": { - "value": "int16x4_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Zfallback.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_s32", + "SIMD_ISA": "SVE", + "name": "svclasta[_f64]", "arguments": [ - "uint64_t a" + "svbool_t pg", + "svfloat64_t fallback", + "svfloat64_t data" ], "return_type": { - "value": "int32x2_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Zfallback.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_s64", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_f16]", "arguments": [ - "uint64_t a" + "svbool_t pg", + "float16_t fallback", + "svfloat16_t data" ], "return_type": { - "value": "int64x1_t" + "value": "float16_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Htied|Wtied" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_s8", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_f32]", "arguments": [ - "uint64_t a" + "svbool_t pg", + "float32_t fallback", + "svfloat32_t data" ], "return_type": { - "value": "int8x8_t" + "value": "float32_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Stied|Wtied" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_u16", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_f64]", "arguments": [ - "uint64_t a" + "svbool_t pg", + "float64_t fallback", + "svfloat64_t data" ], "return_type": { - "value": "uint16x4_t" + "value": "float64_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Dtied|Xtied" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_u32", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_s16]", "arguments": [ - "uint64_t a" + "svbool_t pg", + "int16_t fallback", + "svint16_t data" ], "return_type": { - "value": "uint32x2_t" + "value": "int16_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Htied|Wtied" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_u64", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_s32]", "arguments": [ - "uint64_t a" + "svbool_t pg", + "int32_t fallback", + "svint32_t data" ], "return_type": { - "value": "uint64x1_t" + "value": "int32_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Stied|Wtied" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcreate_u8", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_s64]", "arguments": [ - "uint64_t a" + "svbool_t pg", + "int64_t fallback", + "svint64_t data" ], "return_type": { - "value": "uint8x8_t" + "value": "int64_t" }, "Arguments_Preparation": { - "a": { - "register": "Xn" + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Dtied|Xtied" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f16_f32", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_s8]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "int8_t fallback", + "svint8_t data" ], "return_type": { - "value": "float16x4_t" + "value": "int8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Btied|Wtied" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTN" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f16_s16", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_u16]", "arguments": [ - "int16x4_t a" + "svbool_t pg", + "uint16_t fallback", + "svuint16_t data" ], "return_type": { - "value": "float16x4_t" + "value": "uint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Htied|Wtied" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f16_u16", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_u32]", "arguments": [ - "uint16x4_t a" + "svbool_t pg", + "uint32_t fallback", + "svuint32_t data" ], "return_type": { - "value": "float16x4_t" + "value": "uint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Stied|Wtied" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f32_f16", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_u64]", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "uint64_t fallback", + "svuint64_t data" ], "return_type": { - "value": "float32x4_t" + "value": "uint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Dtied|Xtied" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTL" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f32_f64", + "SIMD_ISA": "SVE", + "name": "svclasta[_n_u8]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "uint8_t fallback", + "svuint8_t data" ], "return_type": { - "value": "float32x2_t" + "value": "uint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Btied|Wtied" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -19980,72 +23616,103 @@ ], "instructions": [ [ - "FCVTN" + "CLASTA" + ], + [ + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f32_s32", + "SIMD_ISA": "SVE", + "name": "svclasta[_s16]", "arguments": [ - "int32x2_t a" + "svbool_t pg", + "svint16_t fallback", + "svint16_t data" ], "return_type": { - "value": "float32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Zfallback.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f32_u32", + "SIMD_ISA": "SVE", + "name": "svclasta[_s32]", "arguments": [ - "uint32x2_t a" + "svbool_t pg", + "svint32_t fallback", + "svint32_t data" ], "return_type": { - "value": "float32x2_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Zfallback.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f64_f32", + "SIMD_ISA": "SVE", + "name": "svclasta[_s64]", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint64_t fallback", + "svint64_t data" ], "return_type": { - "value": "float64x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Zfallback.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -20053,22 +23720,34 @@ ], "instructions": [ [ - "FCVTL" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f64_s64", + "SIMD_ISA": "SVE", + "name": "svclasta[_s8]", "arguments": [ - "int64x1_t a" + "svbool_t pg", + "svint8_t fallback", + "svint8_t data" ], "return_type": { - "value": "float64x1_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Zfallback.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -20076,22 +23755,34 @@ ], "instructions": [ [ - "SCVTF" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_f64_u64", + "SIMD_ISA": "SVE", + "name": "svclasta[_u16]", "arguments": [ - "uint64x1_t a" + "svbool_t pg", + "svuint16_t fallback", + "svuint16_t data" ], "return_type": { - "value": "float64x1_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Zfallback.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -20099,26 +23790,34 @@ ], "instructions": [ [ - "UCVTF" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_high_f16_f32", + "SIMD_ISA": "SVE", + "name": "svclasta[_u32]", "arguments": [ - "float16x4_t r", - "float32x4_t a" + "svbool_t pg", + "svuint32_t fallback", + "svuint32_t data" ], "return_type": { - "value": "float16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "data": { + "register": "Zdata.S" }, - "r": { - "register": "Vd.4H" + "fallback": { + "register": "Zfallback.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -20126,22 +23825,34 @@ ], "instructions": [ [ - "FCVTN2" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_high_f32_f16", + "SIMD_ISA": "SVE", + "name": "svclasta[_u64]", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svuint64_t fallback", + "svuint64_t data" ], "return_type": { - "value": "float32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Zfallback.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -20149,26 +23860,34 @@ ], "instructions": [ [ - "FCVTL2" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_high_f32_f64", + "SIMD_ISA": "SVE", + "name": "svclasta[_u8]", "arguments": [ - "float32x2_t r", - "float64x2_t a" + "svbool_t pg", + "svuint8_t fallback", + "svuint8_t data" ], "return_type": { - "value": "float32x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "data": { + "register": "Zdata.B" }, - "r": { - "register": "Vd.2S" + "fallback": { + "register": "Zfallback.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -20176,22 +23895,34 @@ ], "instructions": [ [ - "FCVTN2" + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_high_f64_f32", + "SIMD_ISA": "SVE", + "name": "svclastb[_f16]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svfloat16_t fallback", + "svfloat16_t data" ], "return_type": { - "value": "float64x2_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Zfallback.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -20199,145 +23930,172 @@ ], "instructions": [ [ - "FCVTL2" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_f16_s16", + "SIMD_ISA": "SVE", + "name": "svclastb[_f32]", "arguments": [ - "int16x4_t a", - "const int n" + "svbool_t pg", + "svfloat32_t fallback", + "svfloat32_t data" ], "return_type": { - "value": "float16x4_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.S" }, - "n": { - "minimum": 1, - "maximum": 16 + "fallback": { + "register": "Zfallback.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_f16_u16", + "SIMD_ISA": "SVE", + "name": "svclastb[_f64]", "arguments": [ - "uint16x4_t a", - "const int n" + "svbool_t pg", + "svfloat64_t fallback", + "svfloat64_t data" ], "return_type": { - "value": "float16x4_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.D" }, - "n": { - "minimum": 1, - "maximum": 16 + "fallback": { + "register": "Zfallback.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_f32_s32", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_f16]", "arguments": [ - "int32x2_t a", - "const int n" + "svbool_t pg", + "float16_t fallback", + "svfloat16_t data" ], "return_type": { - "value": "float32x2_t" + "value": "float16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.H" }, - "n": { - "minimum": 1, - "maximum": 32 + "fallback": { + "register": "Htied|Wtied" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_f32_u32", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_f32]", "arguments": [ - "uint32x2_t a", - "const int n" + "svbool_t pg", + "float32_t fallback", + "svfloat32_t data" ], "return_type": { - "value": "float32x2_t" + "value": "float32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.S" }, - "n": { - "minimum": 1, - "maximum": 32 + "fallback": { + "register": "Stied|Wtied" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_f64_s64", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_f64]", "arguments": [ - "int64x1_t a", - "const int n" + "svbool_t pg", + "float64_t fallback", + "svfloat64_t data" ], "return_type": { - "value": "float64x1_t" + "value": "float64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "data": { + "register": "Zdata.D" }, - "n": { - "minimum": 1, - "maximum": 64 + "fallback": { + "register": "Dtied|Xtied" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -20345,27 +24103,33 @@ ], "instructions": [ [ - "SCVTF" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_f64_u64", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_s16]", "arguments": [ - "uint64x1_t a", - "const int n" + "svbool_t pg", + "int16_t fallback", + "svint16_t data" ], "return_type": { - "value": "float64x1_t" + "value": "int16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "data": { + "register": "Zdata.H" }, - "n": { - "minimum": 1, - "maximum": 64 + "fallback": { + "register": "Htied|Wtied" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -20373,86 +24137,101 @@ ], "instructions": [ [ - "UCVTF" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_s16_f16", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_s32]", "arguments": [ - "float16x4_t a", - "const int n" + "svbool_t pg", + "int32_t fallback", + "svint32_t data" ], "return_type": { - "value": "int16x4_t" + "value": "int32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.S" }, - "n": { - "minimum": 1, - "maximum": 16 + "fallback": { + "register": "Stied|Wtied" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_s32_f32", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_s64]", "arguments": [ - "float32x2_t a", - "const int n" + "svbool_t pg", + "int64_t fallback", + "svint64_t data" ], "return_type": { - "value": "int32x2_t" + "value": "int64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.D" }, - "n": { - "minimum": 1, - "maximum": 32 + "fallback": { + "register": "Dtied|Xtied" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_s64_f64", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_s8]", "arguments": [ - "float64x1_t a", - "const int n" + "svbool_t pg", + "int8_t fallback", + "svint8_t data" ], "return_type": { - "value": "int64x1_t" + "value": "int8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "data": { + "register": "Zdata.B" }, - "n": { - "minimum": 1, - "maximum": 64 + "fallback": { + "register": "Btied|Wtied" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -20460,86 +24239,101 @@ ], "instructions": [ [ - "FCVTZS" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_u16_f16", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_u16]", "arguments": [ - "float16x4_t a", - "const int n" + "svbool_t pg", + "uint16_t fallback", + "svuint16_t data" ], "return_type": { - "value": "uint16x4_t" + "value": "uint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.H" }, - "n": { - "minimum": 1, - "maximum": 16 + "fallback": { + "register": "Htied|Wtied" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZU" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_u32_f32", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_u32]", "arguments": [ - "float32x2_t a", - "const int n" + "svbool_t pg", + "uint32_t fallback", + "svuint32_t data" ], "return_type": { - "value": "uint32x2_t" + "value": "uint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.S" }, - "n": { - "minimum": 1, - "maximum": 32 + "fallback": { + "register": "Stied|Wtied" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTZU" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_n_u64_f64", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_u64]", "arguments": [ - "float64x1_t a", - "const int n" + "svbool_t pg", + "uint64_t fallback", + "svuint64_t data" ], "return_type": { - "value": "uint64x1_t" + "value": "uint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "data": { + "register": "Zdata.D" }, - "n": { - "minimum": 1, - "maximum": 64 + "fallback": { + "register": "Dtied|Xtied" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -20547,71 +24341,102 @@ ], "instructions": [ [ - "FCVTZU" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_s16_f16", + "SIMD_ISA": "SVE", + "name": "svclastb[_n_u8]", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "uint8_t fallback", + "svuint8_t data" ], "return_type": { - "value": "int16x4_t" + "value": "uint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Btied|Wtied" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CLASTB" + ], + [ + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_s32_f32", + "SIMD_ISA": "SVE", + "name": "svclastb[_s16]", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint16_t fallback", + "svint16_t data" ], "return_type": { - "value": "int32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Zfallback.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_s64_f64", + "SIMD_ISA": "SVE", + "name": "svclastb[_s32]", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svint32_t fallback", + "svint32_t data" ], "return_type": { - "value": "int64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Zfallback.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -20619,71 +24444,104 @@ ], "instructions": [ [ - "FCVTZS" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_u16_f16", + "SIMD_ISA": "SVE", + "name": "svclastb[_s64]", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svint64_t fallback", + "svint64_t data" ], "return_type": { - "value": "uint16x4_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Zfallback.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_u32_f32", + "SIMD_ISA": "SVE", + "name": "svclastb[_s8]", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint8_t fallback", + "svint8_t data" ], "return_type": { - "value": "uint32x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Zfallback.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTZU" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvt_u64_f64", + "SIMD_ISA": "SVE", + "name": "svclastb[_u16]", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svuint16_t fallback", + "svuint16_t data" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Zfallback.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -20691,70 +24549,104 @@ ], "instructions": [ [ - "FCVTZU" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvta_s16_f16", + "SIMD_ISA": "SVE", + "name": "svclastb[_u32]", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svuint32_t fallback", + "svuint32_t data" ], "return_type": { - "value": "int16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Zfallback.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAS" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvta_s32_f32", + "SIMD_ISA": "SVE", + "name": "svclastb[_u64]", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svuint64_t fallback", + "svuint64_t data" ], "return_type": { - "value": "int32x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Zfallback.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAS" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvta_s64_f64", + "SIMD_ISA": "SVE", + "name": "svclastb[_u8]", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svuint8_t fallback", + "svuint8_t data" ], "return_type": { - "value": "int64x1_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Zfallback.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -20762,70 +24654,96 @@ ], "instructions": [ [ - "FCVTAS" + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvta_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcls[_s16]_m", "arguments": [ - "float16x4_t a" + "svuint16_t inactive", + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAU" + "CLS" + ], + [ + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvta_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcls[_s16]_x", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAU" + "CLS" + ], + [ + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvta_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcls[_s16]_z", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -20833,22 +24751,31 @@ ], "instructions": [ [ - "FCVTAU" + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtad_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcls[_s32]_m", "arguments": [ - "float64_t a" + "svuint32_t inactive", + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "int64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -20856,22 +24783,30 @@ ], "instructions": [ [ - "FCVTAS" + "CLS" + ], + [ + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtad_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcls[_s32]_x", "arguments": [ - "float64_t a" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -20879,22 +24814,30 @@ ], "instructions": [ [ - "FCVTAU" + "CLS" + ], + [ + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtah_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcls[_s32]_z", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "int16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -20902,46 +24845,62 @@ ], "instructions": [ [ - "FCVTAS" + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtah_s32_f16", + "SIMD_ISA": "SVE", + "name": "svcls[_s64]_m", "arguments": [ - "float16_t a" + "svuint64_t inactive", + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "int32_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAS" + "CLS" + ], + [ + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtah_s64_f16", + "SIMD_ISA": "SVE", + "name": "svcls[_s64]_x", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "int64_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -20949,22 +24908,30 @@ ], "instructions": [ [ - "FCVTAS" + "CLS" + ], + [ + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtah_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcls[_s64]_z", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "uint16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -20972,46 +24939,62 @@ ], "instructions": [ [ - "FCVTAU" + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtah_u32_f16", + "SIMD_ISA": "SVE", + "name": "svcls[_s8]_m", "arguments": [ - "float16_t a" + "svuint8_t inactive", + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "uint32_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAU" + "CLS" + ], + [ + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtah_u64_f16", + "SIMD_ISA": "SVE", + "name": "svcls[_s8]_x", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "uint64_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -21019,70 +25002,93 @@ ], "instructions": [ [ - "FCVTAU" + "CLS" + ], + [ + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtaq_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcls[_s8]_z", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "int16x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAS" + "MOVPRFX", + "CLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtaq_s32_f32", + "SIMD_ISA": "SVE", + "name": "svclz[_s16]_m", "arguments": [ - "float32x4_t a" + "svuint16_t inactive", + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "int32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAS" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtaq_s64_f64", + "SIMD_ISA": "SVE", + "name": "svclz[_s16]_x", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "int64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -21090,70 +25096,93 @@ ], "instructions": [ [ - "FCVTAS" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtaq_u16_f16", + "SIMD_ISA": "SVE", + "name": "svclz[_s16]_z", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAU" + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtaq_u32_f32", + "SIMD_ISA": "SVE", + "name": "svclz[_s32]_m", "arguments": [ - "float32x4_t a" + "svuint32_t inactive", + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTAU" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtaq_u64_f64", + "SIMD_ISA": "SVE", + "name": "svclz[_s32]_x", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -21161,22 +25190,30 @@ ], "instructions": [ [ - "FCVTAU" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtas_s32_f32", + "SIMD_ISA": "SVE", + "name": "svclz[_s32]_z", "arguments": [ - "float32_t a" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "int32_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -21184,22 +25221,31 @@ ], "instructions": [ [ - "FCVTAS" + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtas_u32_f32", + "SIMD_ISA": "SVE", + "name": "svclz[_s64]_m", "arguments": [ - "float32_t a" + "svuint64_t inactive", + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "uint32_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -21207,22 +25253,30 @@ ], "instructions": [ [ - "FCVTAU" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtd_f64_s64", + "SIMD_ISA": "SVE", + "name": "svclz[_s64]_x", "arguments": [ - "int64_t a" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "float64_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -21230,22 +25284,30 @@ ], "instructions": [ [ - "SCVTF" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtd_f64_u64", + "SIMD_ISA": "SVE", + "name": "svclz[_s64]_z", "arguments": [ - "uint64_t a" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "float64_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -21253,27 +25315,31 @@ ], "instructions": [ [ - "UCVTF" + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtd_n_f64_s64", + "SIMD_ISA": "SVE", + "name": "svclz[_s8]_m", "arguments": [ - "int64_t a", - "const int n" + "svuint8_t inactive", + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "float64_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "inactive": { + "register": "Zinactive.B|Ztied.B" }, - "n": { - "minimum": 1, - "maximum": 64 + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -21281,27 +25347,30 @@ ], "instructions": [ [ - "SCVTF" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtd_n_f64_u64", + "SIMD_ISA": "SVE", + "name": "svclz[_s8]_x", "arguments": [ - "uint64_t a", - "const int n" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "float64_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.B|Ztied.B" }, - "n": { - "minimum": 1, - "maximum": 64 + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -21309,27 +25378,30 @@ ], "instructions": [ [ - "UCVTF" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtd_n_s64_f64", + "SIMD_ISA": "SVE", + "name": "svclz[_s8]_z", "arguments": [ - "float64_t a", - "const int n" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "int64_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.B" }, - "n": { - "minimum": 1, - "maximum": 64 + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -21337,27 +25409,31 @@ ], "instructions": [ [ - "FCVTZS" + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtd_n_u64_f64", + "SIMD_ISA": "SVE", + "name": "svclz[_u16]_m", "arguments": [ - "float64_t a", - "const int n" + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "uint64_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "inactive": { + "register": "Zinactive.H|Ztied.H" }, - "n": { - "minimum": 1, - "maximum": 64 + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -21365,22 +25441,30 @@ ], "instructions": [ [ - "FCVTZU" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtd_s64_f64", + "SIMD_ISA": "SVE", + "name": "svclz[_u16]_x", "arguments": [ - "float64_t a" + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "int64_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -21388,22 +25472,30 @@ ], "instructions": [ [ - "FCVTZS" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtd_u64_f64", + "SIMD_ISA": "SVE", + "name": "svclz[_u16]_z", "arguments": [ - "float64_t a" + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "uint64_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -21411,22 +25503,31 @@ ], "instructions": [ [ - "FCVTZU" + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_f16_s16", + "SIMD_ISA": "SVE", + "name": "svclz[_u32]_m", "arguments": [ - "int16_t a" + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "float16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -21434,46 +25535,61 @@ ], "instructions": [ [ - "SCVTF" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_f16_s32", + "SIMD_ISA": "SVE", + "name": "svclz[_u32]_x", "arguments": [ - "int32_t a" + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "float16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_f16_s64", + "SIMD_ISA": "SVE", + "name": "svclz[_u32]_z", "arguments": [ - "int64_t a" + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "float16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -21481,22 +25597,31 @@ ], "instructions": [ [ - "SCVTF" + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_f16_u16", + "SIMD_ISA": "SVE", + "name": "svclz[_u64]_m", "arguments": [ - "uint16_t a" + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "float16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -21504,46 +25629,61 @@ ], "instructions": [ [ - "UCVTF" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_f16_u32", + "SIMD_ISA": "SVE", + "name": "svclz[_u64]_x", "arguments": [ - "uint32_t a" + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "float16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_f16_u64", + "SIMD_ISA": "SVE", + "name": "svclz[_u64]_z", "arguments": [ - "uint64_t a" + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "float16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -21551,27 +25691,31 @@ ], "instructions": [ [ - "UCVTF" + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_f16_s16", + "SIMD_ISA": "SVE", + "name": "svclz[_u8]_m", "arguments": [ - "int16_t a", - "const int n" + "svuint8_t inactive", + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "float16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "inactive": { + "register": "Zinactive.B|Ztied.B" }, - "n": { - "minimum": 1, - "maximum": 16 + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -21579,56 +25723,61 @@ ], "instructions": [ [ - "SCVTF" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_f16_s32", + "SIMD_ISA": "SVE", + "name": "svclz[_u8]_x", "arguments": [ - "int32_t a", - "const int n" + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "float16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.B|Ztied.B" }, - "n": { - "minimum": 1, - "maximum": 16 + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_f16_s64", + "SIMD_ISA": "SVE", + "name": "svclz[_u8]_z", "arguments": [ - "int64_t a", - "const int n" + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "float16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op": { + "register": "Zop.B" }, - "n": { - "minimum": 1, - "maximum": 16 + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -21636,27 +25785,36 @@ ], "instructions": [ [ - "SCVTF" + "MOVPRFX", + "CLZ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_f16_u16", + "SIMD_ISA": "SVE", + "name": "svcmla[_f16]_m", "arguments": [ - "uint16_t a", - "const int n" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "float16_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -21664,56 +25822,79 @@ ], "instructions": [ [ - "UCVTF" + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_f16_u32", + "SIMD_ISA": "SVE", + "name": "svcmla[_f16]_x", "arguments": [ - "uint32_t a", - "const int n" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "float16_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_f16_u64", + "SIMD_ISA": "SVE", + "name": "svcmla[_f16]_z", "arguments": [ - "uint64_t a", - "const int n" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "float16_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -21721,27 +25902,36 @@ ], "instructions": [ [ - "UCVTF" + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmla[_f32]_m", "arguments": [ - "float16_t a", - "const int n" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "int16_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -21749,56 +25939,79 @@ ], "instructions": [ [ - "FCVTZS" + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_s32_f16", + "SIMD_ISA": "SVE", + "name": "svcmla[_f32]_x", "arguments": [ - "float16_t a", - "const int n" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "int32_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_s64_f16", + "SIMD_ISA": "SVE", + "name": "svcmla[_f32]_z", "arguments": [ - "float16_t a", - "const int n" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "int64_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -21806,27 +26019,36 @@ ], "instructions": [ [ - "FCVTZS" + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmla[_f64]_m", "arguments": [ - "float16_t a", - "const int n" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint16_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -21834,56 +26056,79 @@ ], "instructions": [ [ - "FCVTZU" + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_u32_f16", + "SIMD_ISA": "SVE", + "name": "svcmla[_f64]_x", "arguments": [ - "float16_t a", - "const int n" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint32_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D|Ztied1.D" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZU" + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_n_u64_f16", + "SIMD_ISA": "SVE", + "name": "svcmla[_f64]_z", "arguments": [ - "float16_t a", - "const int n" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint64_t" + "value": "svfloat64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -21891,22 +26136,32 @@ ], "instructions": [ [ - "FCVTZU" + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_s16_f16", + "SIMD_ISA": "SVE2", + "name": "svcmla[_s16]", "arguments": [ - "float16_t a" + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "int16_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -21914,46 +26169,71 @@ ], "instructions": [ [ - "FCVTZS" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_s32_f16", + "SIMD_ISA": "SVE2", + "name": "svcmla[_s32]", "arguments": [ - "float16_t a" + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "int32_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_s64_f16", + "SIMD_ISA": "SVE2", + "name": "svcmla[_s64]", "arguments": [ - "float16_t a" + "svint64_t op1", + "svint64_t op2", + "svint64_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "int64_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" } }, "Architectures": [ @@ -21961,22 +26241,35 @@ ], "instructions": [ [ - "FCVTZS" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_u16_f16", + "SIMD_ISA": "SVE2", + "name": "svcmla[_s8]", "arguments": [ - "float16_t a" + "svint8_t op1", + "svint8_t op2", + "svint8_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" } }, "Architectures": [ @@ -21984,46 +26277,71 @@ ], "instructions": [ [ - "FCVTZU" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_u32_f16", + "SIMD_ISA": "SVE2", + "name": "svcmla[_u16]", "arguments": [ - "float16_t a" + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint32_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZU" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvth_u64_f16", + "SIMD_ISA": "SVE2", + "name": "svcmla[_u32]", "arguments": [ - "float16_t a" + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ @@ -22031,70 +26349,112 @@ ], "instructions": [ [ - "FCVTZU" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtm_s16_f16", + "SIMD_ISA": "SVE2", + "name": "svcmla[_u64]", "arguments": [ - "float16x4_t a" + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "int16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMS" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtm_s32_f32", + "SIMD_ISA": "SVE2", + "name": "svcmla[_u8]", "arguments": [ - "float32x2_t a" + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3", + "uint64_t imm_rotation" ], "return_type": { - "value": "int32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMS" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtm_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmla_lane[_f16]", "arguments": [ - "float64x1_t a" + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" ], "return_type": { - "value": "int64x1_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -22102,70 +26462,122 @@ ], "instructions": [ [ - "FCVTMS" + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtm_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmla_lane[_f32]", "arguments": [ - "float16x4_t a" + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint16x4_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMU" + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtm_u32_f32", + "SIMD_ISA": "SVE2", + "name": "svcmla_lane[_s16]", "arguments": [ - "float32x2_t a" + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint32x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMU" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtm_u64_f64", + "SIMD_ISA": "SVE2", + "name": "svcmla_lane[_s32]", "arguments": [ - "float64x1_t a" + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint64x1_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ @@ -22173,22 +26585,40 @@ ], "instructions": [ [ - "FCVTMU" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmd_s64_f64", + "SIMD_ISA": "SVE2", + "name": "svcmla_lane[_u16]", "arguments": [ - "float64_t a" + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" ], "return_type": { - "value": "int64_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" } }, "Architectures": [ @@ -22196,22 +26626,40 @@ ], "instructions": [ [ - "FCVTMS" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmd_u64_f64", + "SIMD_ISA": "SVE2", + "name": "svcmla_lane[_u32]", "arguments": [ - "float64_t a" + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" ], "return_type": { - "value": "uint64_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" } }, "Architectures": [ @@ -22219,22 +26667,34 @@ ], "instructions": [ [ - "FCVTMU" + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmh_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_f16]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -22242,46 +26702,61 @@ ], "instructions": [ [ - "FCVTMS" + "FCMEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmh_s32_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_f32]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "int32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMS" + "FCMEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmh_s64_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_f64]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "int64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -22289,22 +26764,30 @@ ], "instructions": [ [ - "FCVTMS" + "FCMEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmh_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_f16]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -22312,46 +26795,67 @@ ], "instructions": [ [ - "FCVTMU" + "FCMEQ" + ], + [ + "FCMEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmh_u32_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_f32]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMU" + "FCMEQ" + ], + [ + "FCMEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmh_u64_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_f64]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -22359,70 +26863,101 @@ ], "instructions": [ [ - "FCVTMU" + "FCMEQ" + ], + [ + "FCMEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmq_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_s16]", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMS" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmq_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_s32]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMS" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmq_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_s64]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -22430,70 +26965,101 @@ ], "instructions": [ [ - "FCVTMS" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmq_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_s8]", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMU" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmq_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_u16]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTMU" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtmq_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_u32]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -22501,22 +27067,33 @@ ], "instructions": [ [ - "FCVTMU" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtms_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_u64]", "arguments": [ - "float32_t a" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "int32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -22524,22 +27101,33 @@ ], "instructions": [ [ - "FCVTMS" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtms_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_u8]", "arguments": [ - "float32_t a" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -22547,70 +27135,95 @@ ], "instructions": [ [ - "FCVTMU" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtn_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_s16]", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNS" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtn_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_s32]", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNS" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtn_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_s64]", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -22618,70 +27231,92 @@ ], "instructions": [ [ - "FCVTNS" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtn_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_s8]", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNU" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtn_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_u16]", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNU" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtn_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_u32]", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -22689,22 +27324,30 @@ ], "instructions": [ [ - "FCVTNU" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnd_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_u64]", "arguments": [ - "float64_t a" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -22712,22 +27355,30 @@ ], "instructions": [ [ - "FCVTNS" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnd_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpeq[_u8]", "arguments": [ - "float64_t a" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -22735,22 +27386,30 @@ ], "instructions": [ [ - "FCVTNU" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnh_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq_wide[_n_s16]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint16_t op1", + "int64_t op2" ], "return_type": { - "value": "int16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -22758,46 +27417,67 @@ ], "instructions": [ [ - "FCVTNS" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnh_s32_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq_wide[_n_s32]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint32_t op1", + "int64_t op2" ], "return_type": { - "value": "int32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNS" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnh_s64_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq_wide[_n_s8]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint8_t op1", + "int64_t op2" ], "return_type": { - "value": "int64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -22805,22 +27485,33 @@ ], "instructions": [ [ - "FCVTNS" + "CMPEQ" + ], + [ + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnh_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq_wide[_s16]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint16_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -22828,46 +27519,61 @@ ], "instructions": [ [ - "FCVTNU" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnh_u32_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq_wide[_s32]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint32_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNU" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnh_u64_f16", + "SIMD_ISA": "SVE", + "name": "svcmpeq_wide[_s8]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint8_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -22875,70 +27581,92 @@ ], "instructions": [ [ - "FCVTNU" + "CMPEQ" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnq_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_f16]", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNS" + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnq_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge[_f32]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNS" + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnq_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpge[_f64]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -22946,70 +27674,98 @@ ], "instructions": [ [ - "FCVTNS" + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnq_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_f16]", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNU" + "FCMGE" + ], + [ + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnq_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_f32]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTNU" + "FCMGE" + ], + [ + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtnq_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_f64]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -23017,22 +27773,33 @@ ], "instructions": [ [ - "FCVTNU" + "FCMGE" + ], + [ + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtns_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_s16]", "arguments": [ - "float32_t a" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "int32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -23040,22 +27807,33 @@ ], "instructions": [ [ - "FCVTNS" + "CMPGE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtns_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_s32]", "arguments": [ - "float32_t a" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -23063,70 +27841,101 @@ ], "instructions": [ [ - "FCVTNU" + "CMPGE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtp_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_s64]", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPS" + "CMPGE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtp_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_s8]", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPS" + "CMPGE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtp_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_u16]", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -23134,70 +27943,101 @@ ], "instructions": [ [ - "FCVTPS" + "CMPHS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtp_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_u32]", "arguments": [ - "float16x4_t a" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPU" + "CMPHS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtp_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_u64]", "arguments": [ - "float32x2_t a" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPU" + "CMPHS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtp_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_u8]", "arguments": [ - "float64x1_t a" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -23205,22 +28045,33 @@ ], "instructions": [ [ - "FCVTPU" + "CMPHS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtpd_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpge[_s16]", "arguments": [ - "float64_t a" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "int64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -23228,22 +28079,30 @@ ], "instructions": [ [ - "FCVTPS" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtpd_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpge[_s32]", "arguments": [ - "float64_t a" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -23251,22 +28110,30 @@ ], "instructions": [ [ - "FCVTPU" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtph_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_s64]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "int16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -23274,46 +28141,61 @@ ], "instructions": [ [ - "FCVTPS" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtph_s32_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_s8]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "int32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPS" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtph_s64_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_u16]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "int64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -23321,22 +28203,30 @@ ], "instructions": [ [ - "FCVTPS" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtph_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_u32]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -23344,46 +28234,61 @@ ], "instructions": [ [ - "FCVTPU" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtph_u32_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_u64]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPU" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtph_u64_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge[_u8]", "arguments": [ - "float16_t a" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -23391,70 +28296,98 @@ ], "instructions": [ [ - "FCVTPU" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtpq_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_n_s16]", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svint16_t op1", + "int64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPS" + "CMPGE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtpq_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_n_s32]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svint32_t op1", + "int64_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPS" + "CMPGE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtpq_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_n_s8]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svint8_t op1", + "int64_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -23462,70 +28395,101 @@ ], "instructions": [ [ - "FCVTPS" + "CMPGE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtpq_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_n_u16]", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPU" + "CMPHS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtpq_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_n_u32]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTPU" + "CMPHS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtpq_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_n_u8]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -23533,22 +28497,33 @@ ], "instructions": [ [ - "FCVTPU" + "CMPHS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtps_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_s16]", "arguments": [ - "float32_t a" + "svbool_t pg", + "svint16_t op1", + "svint64_t op2" ], "return_type": { - "value": "int32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -23556,22 +28531,30 @@ ], "instructions": [ [ - "FCVTPS" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtps_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_s32]", "arguments": [ - "float32_t a" + "svbool_t pg", + "svint32_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -23579,120 +28562,154 @@ ], "instructions": [ [ - "FCVTPU" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_f16_s16", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_s8]", "arguments": [ - "int16x8_t a" + "svbool_t pg", + "svint8_t op1", + "svint64_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_f16_u16", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_u16]", "arguments": [ - "uint16x8_t a" + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_f32_s32", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_u32]", "arguments": [ - "int32x4_t a" + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_f32_u32", + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_u8]", "arguments": [ - "uint32x4_t a" + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_f64_s64", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_f16]", "arguments": [ - "int64x2_t a" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -23700,22 +28717,30 @@ ], "instructions": [ [ - "SCVTF" + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_f64_u64", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_f32]", "arguments": [ - "uint64x2_t a" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -23723,145 +28748,163 @@ ], "instructions": [ [ - "UCVTF" + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_f16_s16", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_f64]", "arguments": [ - "int16x8_t a", - "const int n" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_f16_u16", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_f16]", "arguments": [ - "uint16x8_t a", - "const int n" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "FCMGT" + ], + [ + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_f32_s32", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_f32]", "arguments": [ - "int32x4_t a", - "const int n" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" }, - "n": { - "minimum": 1, - "maximum": 32 + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SCVTF" + "FCMGT" + ], + [ + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_f32_u32", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_f64]", "arguments": [ - "uint32x4_t a", - "const int n" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" }, - "n": { - "minimum": 1, - "maximum": 32 + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UCVTF" + "FCMGT" + ], + [ + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_f64_s64", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_s16]", "arguments": [ - "int64x2_t a", - "const int n" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" }, - "n": { - "minimum": 1, - "maximum": 64 + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -23869,27 +28912,33 @@ ], "instructions": [ [ - "SCVTF" + "CMPGT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_f64_u64", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_s32]", "arguments": [ - "uint64x2_t a", - "const int n" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" }, - "n": { - "minimum": 1, - "maximum": 64 + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -23897,86 +28946,101 @@ ], "instructions": [ [ - "UCVTF" + "CMPGT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_s64]", "arguments": [ - "float16x8_t a", - "const int n" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.D" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CMPGT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_s8]", "arguments": [ - "float32x4_t a", - "const int n" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.B" }, - "n": { - "minimum": 1, - "maximum": 32 + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CMPGT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_u16]", "arguments": [ - "float64x2_t a", - "const int n" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" }, - "n": { - "minimum": 1, - "maximum": 64 + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -23984,86 +29048,101 @@ ], "instructions": [ [ - "FCVTZS" + "CMPHI" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_u32]", "arguments": [ - "float16x8_t a", - "const int n" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S" }, - "n": { - "minimum": 1, - "maximum": 16 + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZU" + "CMPHI" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_u64]", "arguments": [ - "float32x4_t a", - "const int n" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" }, - "n": { - "minimum": 1, - "maximum": 32 + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTZU" + "CMPHI" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_n_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_u8]", "arguments": [ - "float64x2_t a", - "const int n" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.B" }, - "n": { - "minimum": 1, - "maximum": 64 + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -24071,71 +29150,95 @@ ], "instructions": [ [ - "FCVTZU" + "CMPHI" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_s16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_s16]", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_s32]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_s64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_s64]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -24143,71 +29246,92 @@ ], "instructions": [ [ - "FCVTZS" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_u16_f16", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_s8]", "arguments": [ - "float16x8_t a" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FCVTZS" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_u16]", "arguments": [ - "float32x4_t a" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FCVTZU" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtq_u64_f64", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_u32]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -24215,22 +29339,30 @@ ], "instructions": [ [ - "FCVTZU" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvts_f32_s32", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_u64]", "arguments": [ - "int32_t a" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -24238,22 +29370,30 @@ ], "instructions": [ [ - "SCVTF" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvts_f32_u32", + "SIMD_ISA": "SVE", + "name": "svcmpgt[_u8]", "arguments": [ - "uint32_t a" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "float32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -24261,27 +29401,30 @@ ], "instructions": [ [ - "UCVTF" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvts_n_f32_s32", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_n_s16]", "arguments": [ - "int32_t a", - "const int n" + "svbool_t pg", + "svint16_t op1", + "int64_t op2" ], "return_type": { - "value": "float32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.H" }, - "n": { - "minimum": 1, - "maximum": 32 + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -24289,27 +29432,33 @@ ], "instructions": [ [ - "SCVTF" + "CMPGT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvts_n_f32_u32", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_n_s32]", "arguments": [ - "uint32_t a", - "const int n" + "svbool_t pg", + "svint32_t op1", + "int64_t op2" ], "return_type": { - "value": "float32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.S" }, - "n": { - "minimum": 1, - "maximum": 32 + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -24317,27 +29466,33 @@ ], "instructions": [ [ - "UCVTF" + "CMPGT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvts_n_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_n_s8]", "arguments": [ - "float32_t a", - "const int n" + "svbool_t pg", + "svint8_t op1", + "int64_t op2" ], "return_type": { - "value": "int32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.B" }, - "n": { - "minimum": 1, - "maximum": 32 + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -24345,27 +29500,33 @@ ], "instructions": [ [ - "FCVTZS" + "CMPGT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvts_n_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_n_u16]", "arguments": [ - "float32_t a", - "const int n" + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.H" }, - "n": { - "minimum": 1, - "maximum": 32 + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -24373,22 +29534,33 @@ ], "instructions": [ [ - "FCVTZU" + "CMPHI" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvts_s32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_n_u32]", "arguments": [ - "float32_t a" + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" ], "return_type": { - "value": "int32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -24396,22 +29568,33 @@ ], "instructions": [ [ - "FCVTZS" + "CMPHI" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvts_u32_f32", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_n_u8]", "arguments": [ - "float32_t a" + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -24419,22 +29602,33 @@ ], "instructions": [ [ - "FCVTZU" + "CMPHI" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtx_f32_f64", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_s16]", "arguments": [ - "float64x2_t a" + "svbool_t pg", + "svint16_t op1", + "svint64_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -24442,26 +29636,30 @@ ], "instructions": [ [ - "FCVTXN" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtx_high_f32_f64", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_s32]", "arguments": [ - "float32x2_t r", - "float64x2_t a" + "svbool_t pg", + "svint32_t op1", + "svint64_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S" }, - "r": { - "register": "Vd.2S" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -24469,22 +29667,30 @@ ], "instructions": [ [ - "FCVTXN2" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vcvtxd_f32_f64", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_s8]", "arguments": [ - "float64_t a" + "svbool_t pg", + "svint8_t op1", + "svint64_t op2" ], "return_type": { - "value": "float32_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -24492,26 +29698,30 @@ ], "instructions": [ [ - "FCVTXN" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdiv_f16", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_u16]", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -24519,26 +29729,30 @@ ], "instructions": [ [ - "FDIV" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdiv_f32", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_u32]", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -24546,26 +29760,30 @@ ], "instructions": [ [ - "FDIV" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdiv_f64", + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_u8]", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Dm" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -24573,54 +29791,61 @@ ], "instructions": [ [ - "FDIV" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdivh_f16", + "SIMD_ISA": "SVE", + "name": "svcmple[_f16]", "arguments": [ - "float16_t a", - "float16_t b" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "float16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Hm" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FDIV" + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdivq_f16", + "SIMD_ISA": "SVE", + "name": "svcmple[_f32]", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -24628,26 +29853,30 @@ ], "instructions": [ [ - "FDIV" + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdivq_f32", + "SIMD_ISA": "SVE", + "name": "svcmple[_f64]", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -24655,26 +29884,30 @@ ], "instructions": [ [ - "FDIV" + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdivq_f64", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_f16]", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -24682,109 +29915,101 @@ ], "instructions": [ [ - "FDIV" + "FCMLE" + ], + [ + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdot_lane_s32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_f32]", "arguments": [ - "int32x2_t r", - "int8x8_t a", - "int8x8_t b", - "const int lane" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.4B" + "op1": { + "register": "Zop1.S" }, - "lane": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.S[*]" }, - "r": { - "register": "Vd.2S" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SDOT" + "FCMLE" + ], + [ + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdot_lane_u32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_f64]", "arguments": [ - "uint32x2_t r", - "uint8x8_t a", - "uint8x8_t b", - "const int lane" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.4B" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.D[*]" }, - "r": { - "register": "Vd.2S" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UDOT" + "FCMLE" + ], + [ + "FCMGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdot_laneq_s32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_s16]", "arguments": [ - "int32x2_t r", - "int8x8_t a", - "int8x16_t b", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.4B" + "op1": { + "register": "Zop1.H" }, - "lane": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.H[*]" }, - "r": { - "register": "Vd.2S" + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -24792,35 +30017,33 @@ ], "instructions": [ [ - "SDOT" + "CMPLE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdot_laneq_u32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_s32]", "arguments": [ - "uint32x2_t r", - "uint8x8_t a", - "uint8x16_t b", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.4B" + "op1": { + "register": "Zop1.S" }, - "lane": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.S[*]" }, - "r": { - "register": "Vd.2S" + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -24828,173 +30051,169 @@ ], "instructions": [ [ - "UDOT" + "CMPLE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdot_s32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_s64]", "arguments": [ - "int32x2_t r", - "int8x8_t a", - "int8x8_t b" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.D" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.D[*]" }, - "r": { - "register": "Vd.2S" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SDOT" + "CMPLE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdot_u32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_s8]", "arguments": [ - "uint32x2_t r", - "uint8x8_t a", - "uint8x8_t b" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op1": { + "register": "Zop1.B" }, - "b": { - "register": "Vm.8B" + "op2": { + "register": "Zop2.B[*]" }, - "r": { - "register": "Vd.2S" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UDOT" + "CMPLE" + ], + [ + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdotq_lane_s32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_u16]", "arguments": [ - "int32x4_t r", - "int8x16_t a", - "int8x8_t b", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.4B" + "op1": { + "register": "Zop1.H" }, - "lane": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.H[*]" }, - "r": { - "register": "Vd.4S" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SDOT" + "CMPLS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdotq_lane_u32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_u32]", "arguments": [ - "uint32x4_t r", - "uint8x16_t a", - "uint8x8_t b", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.4B" + "op1": { + "register": "Zop1.S" }, - "lane": { - "minimum": 0, - "maximum": 1 + "op2": { + "register": "Zop2.S[*]" }, - "r": { - "register": "Vd.4S" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UDOT" + "CMPLS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdotq_laneq_s32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_u64]", "arguments": [ - "int32x4_t r", - "int8x16_t a", - "int8x16_t b", - "const int lane" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.4B" + "op1": { + "register": "Zop1.D" }, - "lane": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.D[*]" }, - "r": { - "register": "Vd.4S" + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -25002,35 +30221,33 @@ ], "instructions": [ [ - "SDOT" + "CMPLS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdotq_laneq_u32", + "SIMD_ISA": "SVE", + "name": "svcmple[_n_u8]", "arguments": [ - "uint32x4_t r", - "uint8x16_t a", - "uint8x16_t b", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.4B" + "op1": { + "register": "Zop1.B" }, - "lane": { - "minimum": 0, - "maximum": 3 + "op2": { + "register": "Zop2.B[*]" }, - "r": { - "register": "Vd.4S" + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -25038,151 +30255,157 @@ ], "instructions": [ [ - "UDOT" + "CMPLS" + ], + [ + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdotq_s32", + "SIMD_ISA": "SVE", + "name": "svcmple[_s16]", "arguments": [ - "int32x4_t r", - "int8x16_t a", - "int8x16_t b" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.H" }, - "r": { - "register": "Vd.4S" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SDOT" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdotq_u32", + "SIMD_ISA": "SVE", + "name": "svcmple[_s32]", "arguments": [ - "uint32x4_t r", - "uint8x16_t a", - "uint8x16_t b" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op1": { + "register": "Zop1.S" }, - "b": { - "register": "Vm.16B" + "op2": { + "register": "Zop2.S" }, - "r": { - "register": "Vd.4S" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "UDOT" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_f16", + "SIMD_ISA": "SVE", + "name": "svcmple[_s64]", "arguments": [ - "float16x4_t vec", - "const int lane" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_f32", + "SIMD_ISA": "SVE", + "name": "svcmple[_s8]", "arguments": [ - "float32x2_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPGE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_f64", + "SIMD_ISA": "SVE", + "name": "svcmple[_u16]", "arguments": [ - "float64x1_t vec", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "float64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -25190,356 +30413,389 @@ ], "instructions": [ [ - "DUP" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_p16", + "SIMD_ISA": "SVE", + "name": "svcmple[_u32]", "arguments": [ - "poly16x4_t vec", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "poly16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_p64", + "SIMD_ISA": "SVE", + "name": "svcmple[_u64]", "arguments": [ - "poly64x1_t vec", - "const int lane" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "poly64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_p8", + "SIMD_ISA": "SVE", + "name": "svcmple[_u8]", "arguments": [ - "poly8x8_t vec", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "poly8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPHS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_s16", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_n_s16]", "arguments": [ - "int16x4_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "int64_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLE" + ], + [ + "CMPLE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_s32", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_n_s32]", "arguments": [ - "int32x2_t vec", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "int64_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLE" + ], + [ + "CMPLE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_s64", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_n_s8]", "arguments": [ - "int64x1_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "int64_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLE" + ], + [ + "CMPLE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_s8", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_n_u16]", "arguments": [ - "int8x8_t vec", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLS" + ], + [ + "CMPLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_u16", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_n_u32]", "arguments": [ - "uint16x4_t vec", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLS" + ], + [ + "CMPLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_u32", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_n_u8]", "arguments": [ - "uint32x2_t vec", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLS" + ], + [ + "CMPLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_u64", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_s16]", "arguments": [ - "uint64x1_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_lane_u8", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_s32]", "arguments": [ - "uint8x8_t vec", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_s8]", "arguments": [ - "float16x8_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "svint64_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -25547,27 +30803,30 @@ ], "instructions": [ [ - "DUP" + "CMPLE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_u16]", "arguments": [ - "float32x4_t vec", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.4S" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -25575,27 +30834,30 @@ ], "instructions": [ [ - "DUP" + "CMPLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_f64", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_u32]", "arguments": [ - "float64x2_t vec", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -25603,27 +30865,30 @@ ], "instructions": [ [ - "DUP" + "CMPLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_p16", + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_u8]", "arguments": [ - "poly16x8_t vec", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" ], "return_type": { - "value": "poly16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -25631,27 +30896,30 @@ ], "instructions": [ [ - "DUP" + "CMPLS" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_p64", + "SIMD_ISA": "SVE", + "name": "svcmplt[_f16]", "arguments": [ - "poly64x2_t vec", - "const int lane" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "poly64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -25659,27 +30927,30 @@ ], "instructions": [ [ - "DUP" + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_p8", + "SIMD_ISA": "SVE", + "name": "svcmplt[_f32]", "arguments": [ - "poly8x16_t vec", - "const int lane" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "poly8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -25687,27 +30958,30 @@ ], "instructions": [ [ - "DUP" + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_s16", + "SIMD_ISA": "SVE", + "name": "svcmplt[_f64]", "arguments": [ - "int16x8_t vec", - "const int lane" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -25715,27 +30989,30 @@ ], "instructions": [ [ - "DUP" + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_s32", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_f16]", "arguments": [ - "int32x4_t vec", - "const int lane" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.4S" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -25743,27 +31020,33 @@ ], "instructions": [ [ - "DUP" + "FCMLT" + ], + [ + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_s64", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_f32]", "arguments": [ - "int64x2_t vec", - "const int lane" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -25771,27 +31054,33 @@ ], "instructions": [ [ - "DUP" + "FCMLT" + ], + [ + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_s8", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_f64]", "arguments": [ - "int8x16_t vec", - "const int lane" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -25799,27 +31088,33 @@ ], "instructions": [ [ - "DUP" + "FCMLT" + ], + [ + "FCMGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_u16", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_s16]", "arguments": [ - "uint16x8_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -25827,27 +31122,33 @@ ], "instructions": [ [ - "DUP" + "CMPLT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_u32", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_s32]", "arguments": [ - "uint32x4_t vec", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.4S" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -25855,27 +31156,33 @@ ], "instructions": [ [ - "DUP" + "CMPLT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_u64", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_s64]", "arguments": [ - "uint64x2_t vec", - "const int lane" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -25883,27 +31190,33 @@ ], "instructions": [ [ - "DUP" + "CMPLT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_laneq_u8", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_s8]", "arguments": [ - "uint8x16_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -25911,72 +31224,101 @@ ], "instructions": [ [ - "DUP" + "CMPLT" + ], + [ + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_f16", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_u16]", "arguments": [ - "float16_t value" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "float16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLO" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_f32", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_u32]", "arguments": [ - "float32_t value" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLO" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_f64", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_u64]", "arguments": [ - "float64_t value" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "float64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -25984,301 +31326,383 @@ ], "instructions": [ [ - "INS" + "CMPLO" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_p16", + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_u8]", "arguments": [ - "poly16_t value" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "poly16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLO" + ], + [ + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_p64", + "SIMD_ISA": "SVE", + "name": "svcmplt[_s16]", "arguments": [ - "poly64_t value" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "poly64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "INS" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_p8", + "SIMD_ISA": "SVE", + "name": "svcmplt[_s32]", "arguments": [ - "poly8_t value" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "poly8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_s16", + "SIMD_ISA": "SVE", + "name": "svcmplt[_s64]", "arguments": [ - "int16_t value" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "int16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_s32", + "SIMD_ISA": "SVE", + "name": "svcmplt[_s8]", "arguments": [ - "int32_t value" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "int32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPGT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_s64", + "SIMD_ISA": "SVE", + "name": "svcmplt[_u16]", "arguments": [ - "int64_t value" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "int64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_s8", + "SIMD_ISA": "SVE", + "name": "svcmplt[_u32]", "arguments": [ - "int8_t value" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "int8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_u16", + "SIMD_ISA": "SVE", + "name": "svcmplt[_u64]", "arguments": [ - "uint16_t value" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint16x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_u32", + "SIMD_ISA": "SVE", + "name": "svcmplt[_u8]", "arguments": [ - "uint32_t value" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint32x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPHI" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_u64", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_n_s16]", "arguments": [ - "uint64_t value" + "svbool_t pg", + "svint16_t op1", + "int64_t op2" ], "return_type": { - "value": "uint64x1_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "INS" + "CMPLT" + ], + [ + "CMPLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdup_n_u8", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_n_s32]", "arguments": [ - "uint8_t value" + "svbool_t pg", + "svint32_t op1", + "int64_t op2" ], "return_type": { - "value": "uint8x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPLT" + ], + [ + "CMPLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupb_lane_p8", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_n_s8]", "arguments": [ - "poly8x8_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "int64_t op2" ], "return_type": { - "value": "poly8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -26286,27 +31710,33 @@ ], "instructions": [ [ - "DUP" + "CMPLT" + ], + [ + "CMPLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupb_lane_s8", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_n_u16]", "arguments": [ - "int8x8_t vec", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" ], "return_type": { - "value": "int8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -26314,27 +31744,33 @@ ], "instructions": [ [ - "DUP" + "CMPLO" + ], + [ + "CMPLO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupb_lane_u8", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_n_u32]", "arguments": [ - "uint8x8_t vec", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" ], "return_type": { - "value": "uint8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -26342,27 +31778,33 @@ ], "instructions": [ [ - "DUP" + "CMPLO" + ], + [ + "CMPLO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupb_laneq_p8", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_n_u8]", "arguments": [ - "poly8x16_t vec", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" ], "return_type": { - "value": "poly8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -26370,27 +31812,33 @@ ], "instructions": [ [ - "DUP" + "CMPLO" + ], + [ + "CMPLO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupb_laneq_s8", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_s16]", "arguments": [ - "int8x16_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "svint64_t op2" ], "return_type": { - "value": "int8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -26398,27 +31846,30 @@ ], "instructions": [ [ - "DUP" + "CMPLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupb_laneq_u8", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_s32]", "arguments": [ - "uint8x16_t vec", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "svint64_t op2" ], "return_type": { - "value": "uint8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -26426,27 +31877,30 @@ ], "instructions": [ [ - "DUP" + "CMPLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupd_lane_f64", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_s8]", "arguments": [ - "float64x1_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "svint64_t op2" ], "return_type": { - "value": "float64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -26454,27 +31908,30 @@ ], "instructions": [ [ - "DUP" + "CMPLT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupd_lane_s64", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_u16]", "arguments": [ - "int64x1_t vec", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" ], "return_type": { - "value": "int64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -26482,27 +31939,30 @@ ], "instructions": [ [ - "DUP" + "CMPLO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupd_lane_u64", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_u32]", "arguments": [ - "uint64x1_t vec", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -26510,27 +31970,30 @@ ], "instructions": [ [ - "DUP" + "CMPLO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupd_laneq_f64", + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_u8]", "arguments": [ - "float64x2_t vec", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" ], "return_type": { - "value": "float64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -26538,27 +32001,30 @@ ], "instructions": [ [ - "DUP" + "CMPLO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupd_laneq_s64", + "SIMD_ISA": "SVE", + "name": "svcmpne[_f16]", "arguments": [ - "int64x2_t vec", - "const int lane" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "int64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -26566,27 +32032,30 @@ ], "instructions": [ [ - "DUP" + "FCMNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupd_laneq_u64", + "SIMD_ISA": "SVE", + "name": "svcmpne[_f32]", "arguments": [ - "uint64x2_t vec", - "const int lane" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "uint64_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -26594,27 +32063,30 @@ ], "instructions": [ [ - "DUP" + "FCMNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vduph_lane_f16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_f64]", "arguments": [ - "float16x4_t vec", - "const int lane" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "float16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -26622,27 +32094,30 @@ ], "instructions": [ [ - "DUP" + "FCMNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vduph_lane_p16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_f16]", "arguments": [ - "poly16x4_t vec", - "const int lane" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "poly16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -26650,27 +32125,33 @@ ], "instructions": [ [ - "DUP" + "FCMNE" + ], + [ + "FCMNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vduph_lane_s16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_f32]", "arguments": [ - "int16x4_t vec", - "const int lane" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "int16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -26678,27 +32159,33 @@ ], "instructions": [ [ - "DUP" + "FCMNE" + ], + [ + "FCMNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vduph_lane_u16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_f64]", "arguments": [ - "uint16x4_t vec", - "const int lane" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -26706,27 +32193,33 @@ ], "instructions": [ [ - "DUP" + "FCMNE" + ], + [ + "FCMNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vduph_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_s16]", "arguments": [ - "float16x8_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "int16_t op2" ], "return_type": { - "value": "float16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -26734,27 +32227,33 @@ ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vduph_laneq_p16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_s32]", "arguments": [ - "poly16x8_t vec", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "int32_t op2" ], "return_type": { - "value": "poly16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -26762,27 +32261,33 @@ ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vduph_laneq_s16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_s64]", "arguments": [ - "int16x8_t vec", - "const int lane" + "svbool_t pg", + "svint64_t op1", + "int64_t op2" ], "return_type": { - "value": "int16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -26790,27 +32295,33 @@ ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vduph_laneq_u16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_s8]", "arguments": [ - "uint16x8_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "int8_t op2" ], "return_type": { - "value": "uint16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -26818,87 +32329,101 @@ ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_f16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_u16]", "arguments": [ - "float16x4_t vec", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_f32", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_u32]", "arguments": [ - "float32x2_t vec", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_f64", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_u64]", "arguments": [ - "float64x1_t vec", - "const int lane" + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -26906,356 +32431,383 @@ ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_p16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_u8]", "arguments": [ - "poly16x4_t vec", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" ], "return_type": { - "value": "poly16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_p64", + "SIMD_ISA": "SVE", + "name": "svcmpne[_s16]", "arguments": [ - "poly64x1_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" ], "return_type": { - "value": "poly64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_p8", + "SIMD_ISA": "SVE", + "name": "svcmpne[_s32]", "arguments": [ - "poly8x8_t vec", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" ], "return_type": { - "value": "poly8x16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_s16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_s64]", "arguments": [ - "int16x4_t vec", - "const int lane" + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_s32", + "SIMD_ISA": "SVE", + "name": "svcmpne[_s8]", "arguments": [ - "int32x2_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_s64", + "SIMD_ISA": "SVE", + "name": "svcmpne[_u16]", "arguments": [ - "int64x1_t vec", - "const int lane" + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_s8", + "SIMD_ISA": "SVE", + "name": "svcmpne[_u32]", "arguments": [ - "int8x8_t vec", - "const int lane" + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_u16", + "SIMD_ISA": "SVE", + "name": "svcmpne[_u64]", "arguments": [ - "uint16x4_t vec", - "const int lane" + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" ], "return_type": { - "value": "uint16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.4H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_u32", + "SIMD_ISA": "SVE", + "name": "svcmpne[_u8]", "arguments": [ - "uint32x2_t vec", - "const int lane" + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" ], "return_type": { - "value": "uint32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.2S" + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_u64", + "SIMD_ISA": "SVE", + "name": "svcmpne_wide[_n_s16]", "arguments": [ - "uint64x1_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "int64_t op2" ], "return_type": { - "value": "uint64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.1D" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_lane_u8", + "SIMD_ISA": "SVE", + "name": "svcmpne_wide[_n_s32]", "arguments": [ - "uint8x8_t vec", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "int64_t op2" ], "return_type": { - "value": "uint8x16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.8B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_f16", + "SIMD_ISA": "SVE", + "name": "svcmpne_wide[_n_s8]", "arguments": [ - "float16x8_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "int64_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -27263,27 +32815,33 @@ ], "instructions": [ [ - "DUP" + "CMPNE" + ], + [ + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svcmpne_wide[_s16]", "arguments": [ - "float32x4_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op1", + "svint64_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.4S" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -27291,27 +32849,30 @@ ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_f64", + "SIMD_ISA": "SVE", + "name": "svcmpne_wide[_s32]", "arguments": [ - "float64x2_t vec", - "const int lane" + "svbool_t pg", + "svint32_t op1", + "svint64_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -27319,27 +32880,30 @@ ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_p16", + "SIMD_ISA": "SVE", + "name": "svcmpne_wide[_s8]", "arguments": [ - "poly16x8_t vec", - "const int lane" + "svbool_t pg", + "svint8_t op1", + "svint64_t op2" ], "return_type": { - "value": "poly16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.B" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -27347,27 +32911,30 @@ ], "instructions": [ [ - "DUP" + "CMPNE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_p64", + "SIMD_ISA": "SVE", + "name": "svcmpuo[_f16]", "arguments": [ - "poly64x2_t vec", - "const int lane" + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" ], "return_type": { - "value": "poly64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -27375,27 +32942,30 @@ ], "instructions": [ [ - "DUP" + "FCMUO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_p8", + "SIMD_ISA": "SVE", + "name": "svcmpuo[_f32]", "arguments": [ - "poly8x16_t vec", - "const int lane" + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" ], "return_type": { - "value": "poly8x16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -27403,27 +32973,30 @@ ], "instructions": [ [ - "DUP" + "FCMUO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_s16", + "SIMD_ISA": "SVE", + "name": "svcmpuo[_f64]", "arguments": [ - "int16x8_t vec", - "const int lane" + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" ], "return_type": { - "value": "int16x8_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.8H" + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -27431,27 +33004,30 @@ ], "instructions": [ [ - "DUP" + "FCMUO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_s32", + "SIMD_ISA": "SVE", + "name": "svcmpuo[_n_f16]", "arguments": [ - "int32x4_t vec", - "const int lane" + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" ], "return_type": { - "value": "int32x4_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op1": { + "register": "Zop1.H" }, - "vec": { - "register": "Vn.4S" + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -27459,27 +33035,30 @@ ], "instructions": [ [ - "DUP" + "FCMUO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_s64", + "SIMD_ISA": "SVE", + "name": "svcmpuo[_n_f32]", "arguments": [ - "int64x2_t vec", - "const int lane" + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" ], "return_type": { - "value": "int64x2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op1": { + "register": "Zop1.S" }, - "vec": { - "register": "Vn.2D" + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -27487,27 +33066,30 @@ ], "instructions": [ [ - "DUP" + "FCMUO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_s8", + "SIMD_ISA": "SVE", + "name": "svcmpuo[_n_f64]", "arguments": [ - "int8x16_t vec", - "const int lane" + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" ], "return_type": { - "value": "int8x16_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 + "op1": { + "register": "Zop1.D" }, - "vec": { - "register": "Vn.16B" + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -27515,27 +33097,30 @@ ], "instructions": [ [ - "DUP" + "FCMUO" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_u16", + "SIMD_ISA": "SVE", + "name": "svcnot[_s16]_m", "arguments": [ - "uint16x8_t vec", - "const int lane" + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "uint16x8_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "inactive": { + "register": "Zinactive.H|Ztied.H" }, - "vec": { - "register": "Vn.8H" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -27543,27 +33128,30 @@ ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_u32", + "SIMD_ISA": "SVE", + "name": "svcnot[_s16]_x", "arguments": [ - "uint32x4_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "uint32x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op": { + "register": "Zop.H|Ztied.H" }, - "vec": { - "register": "Vn.4S" + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -27571,27 +33159,30 @@ ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_u64", + "SIMD_ISA": "SVE", + "name": "svcnot[_s16]_z", "arguments": [ - "uint64x2_t vec", - "const int lane" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "uint64x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op": { + "register": "Zop.H" }, - "vec": { - "register": "Vn.2D" + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -27599,27 +33190,31 @@ ], "instructions": [ [ - "DUP" + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_laneq_u8", + "SIMD_ISA": "SVE", + "name": "svcnot[_s32]_m", "arguments": [ - "uint8x16_t vec", - "const int lane" + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint8x16_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 + "inactive": { + "register": "Zinactive.S|Ztied.S" }, - "vec": { - "register": "Vn.16B" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -27627,72 +33222,93 @@ ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_f16", + "SIMD_ISA": "SVE", + "name": "svcnot[_s32]_x", "arguments": [ - "float16_t value" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "float16x8_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_f32", + "SIMD_ISA": "SVE", + "name": "svcnot[_s32]_z", "arguments": [ - "float32_t value" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "float32x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_f64", + "SIMD_ISA": "SVE", + "name": "svcnot[_s64]_m", "arguments": [ - "float64_t value" + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "float64x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -27700,301 +33316,375 @@ ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_p16", + "SIMD_ISA": "SVE", + "name": "svcnot[_s64]_x", "arguments": [ - "poly16_t value" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "poly16x8_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_p64", + "SIMD_ISA": "SVE", + "name": "svcnot[_s64]_z", "arguments": [ - "poly64_t value" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "poly64x2_t" + "value": "svint64_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "DUP" + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_p8", + "SIMD_ISA": "SVE", + "name": "svcnot[_s8]_m", "arguments": [ - "poly8_t value" + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "poly8x16_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_s16", + "SIMD_ISA": "SVE", + "name": "svcnot[_s8]_x", "arguments": [ - "int16_t value" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "int16x8_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_s32", + "SIMD_ISA": "SVE", + "name": "svcnot[_s8]_z", "arguments": [ - "int32_t value" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "int32x4_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_s64", + "SIMD_ISA": "SVE", + "name": "svcnot[_u16]_m", "arguments": [ - "int64_t value" + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "int64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_s8", + "SIMD_ISA": "SVE", + "name": "svcnot[_u16]_x", "arguments": [ - "int8_t value" + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "int8x16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_u16", + "SIMD_ISA": "SVE", + "name": "svcnot[_u16]_z", "arguments": [ - "uint16_t value" + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_u32", + "SIMD_ISA": "SVE", + "name": "svcnot[_u32]_m", "arguments": [ - "uint32_t value" + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_u64", + "SIMD_ISA": "SVE", + "name": "svcnot[_u32]_x", "arguments": [ - "uint64_t value" + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdupq_n_u8", + "SIMD_ISA": "SVE", + "name": "svcnot[_u32]_z", "arguments": [ - "uint8_t value" + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdups_lane_f32", + "SIMD_ISA": "SVE", + "name": "svcnot[_u64]_m", "arguments": [ - "float32x2_t vec", - "const int lane" + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "float32_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "inactive": { + "register": "Zinactive.D|Ztied.D" }, - "vec": { - "register": "Vn.2S" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -28002,27 +33692,30 @@ ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdups_lane_s32", + "SIMD_ISA": "SVE", + "name": "svcnot[_u64]_x", "arguments": [ - "int32x2_t vec", - "const int lane" + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "int32_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op": { + "register": "Zop.D|Ztied.D" }, - "vec": { - "register": "Vn.2S" + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -28030,27 +33723,30 @@ ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdups_lane_u32", + "SIMD_ISA": "SVE", + "name": "svcnot[_u64]_z", "arguments": [ - "uint32x2_t vec", - "const int lane" + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "uint32_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "op": { + "register": "Zop.D" }, - "vec": { - "register": "Vn.2S" + "pg": { + "register": "Pg.D" } }, "Architectures": [ @@ -28058,27 +33754,31 @@ ], "instructions": [ [ - "DUP" + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdups_laneq_f32", + "SIMD_ISA": "SVE", + "name": "svcnot[_u8]_m", "arguments": [ - "float32x4_t vec", - "const int lane" + "svuint8_t inactive", + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "float32_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "inactive": { + "register": "Zinactive.B|Ztied.B" }, - "vec": { - "register": "Vn.4S" + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -28086,27 +33786,30 @@ ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdups_laneq_s32", + "SIMD_ISA": "SVE", + "name": "svcnot[_u8]_x", "arguments": [ - "int32x4_t vec", - "const int lane" + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "int32_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op": { + "register": "Zop.B|Ztied.B" }, - "vec": { - "register": "Vn.4S" + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -28114,27 +33817,30 @@ ], "instructions": [ [ - "DUP" + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vdups_laneq_u32", + "SIMD_ISA": "SVE", + "name": "svcnot[_u8]_z", "arguments": [ - "uint32x4_t vec", - "const int lane" + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "uint32_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "op": { + "register": "Zop.B" }, - "vec": { - "register": "Vn.4S" + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -28142,779 +33848,845 @@ ], "instructions": [ [ - "DUP" + "MOVPRFX", + "CNOT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor3q_s16", + "SIMD_ISA": "SVE", + "name": "svcnt[_f16]_m", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16x8_t c" + "svuint16_t inactive", + "svbool_t pg", + "svfloat16_t op" ], "return_type": { - "value": "int16x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.H" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "EOR3" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor3q_s32", + "SIMD_ISA": "SVE", + "name": "svcnt[_f16]_x", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32x4_t c" + "svbool_t pg", + "svfloat16_t op" ], "return_type": { - "value": "int32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.H|Ztied.H" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.H" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "EOR3" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor3q_s64", + "SIMD_ISA": "SVE", + "name": "svcnt[_f16]_z", "arguments": [ - "int64x2_t a", - "int64x2_t b", - "int64x2_t c" + "svbool_t pg", + "svfloat16_t op" ], "return_type": { - "value": "int64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.H" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.H" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "EOR3" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor3q_s8", + "SIMD_ISA": "SVE", + "name": "svcnt[_f32]_m", "arguments": [ - "int8x16_t a", - "int8x16_t b", - "int8x16_t c" + "svuint32_t inactive", + "svbool_t pg", + "svfloat32_t op" ], "return_type": { - "value": "int8x16_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.S" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "EOR3" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor3q_u16", + "SIMD_ISA": "SVE", + "name": "svcnt[_f32]_x", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16x8_t c" + "svbool_t pg", + "svfloat32_t op" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.S|Ztied.S" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.S" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "EOR3" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor3q_u32", + "SIMD_ISA": "SVE", + "name": "svcnt[_f32]_z", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c" + "svbool_t pg", + "svfloat32_t op" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.S" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.S" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "EOR3" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor3q_u64", + "SIMD_ISA": "SVE", + "name": "svcnt[_f64]_m", "arguments": [ - "uint64x2_t a", - "uint64x2_t b", - "uint64x2_t c" + "svuint64_t inactive", + "svbool_t pg", + "svfloat64_t op" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.D" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "EOR3" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor3q_u8", + "SIMD_ISA": "SVE", + "name": "svcnt[_f64]_x", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", - "uint8x16_t c" + "svbool_t pg", + "svfloat64_t op" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.D|Ztied.D" }, - "b": {}, - "c": {} + "pg": { + "register": "Pg.D" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "EOR3" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor_s16", + "SIMD_ISA": "SVE", + "name": "svcnt[_f64]_z", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "svbool_t pg", + "svfloat64_t op" ], "return_type": { - "value": "int16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.D" }, - "b": { - "register": "Vm.8B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor_s32", + "SIMD_ISA": "SVE", + "name": "svcnt[_s16]_m", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "svuint16_t inactive", + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "int32x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "inactive": { + "register": "Zinactive.H|Ztied.H" }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor_s64", + "SIMD_ISA": "SVE", + "name": "svcnt[_s16]_x", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "int64x1_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.H|Ztied.H" }, - "b": { - "register": "Vm.8B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor_s8", + "SIMD_ISA": "SVE", + "name": "svcnt[_s16]_z", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "svbool_t pg", + "svint16_t op" ], "return_type": { - "value": "int8x8_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.H" }, - "b": { - "register": "Vm.8B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor_u16", + "SIMD_ISA": "SVE", + "name": "svcnt[_s32]_m", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "svuint32_t inactive", + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "inactive": { + "register": "Zinactive.S|Ztied.S" }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor_u32", + "SIMD_ISA": "SVE", + "name": "svcnt[_s32]_x", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint32x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.S|Ztied.S" }, - "b": { - "register": "Vm.8B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor_u64", + "SIMD_ISA": "SVE", + "name": "svcnt[_s32]_z", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "svbool_t pg", + "svint32_t op" ], "return_type": { - "value": "uint64x1_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.S" }, - "b": { - "register": "Vm.8B" + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veor_u8", + "SIMD_ISA": "SVE", + "name": "svcnt[_s64]_m", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "svuint64_t inactive", + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "uint8x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "inactive": { + "register": "Zinactive.D|Ztied.D" }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veorq_s16", + "SIMD_ISA": "SVE", + "name": "svcnt[_s64]_x", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "int16x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.D|Ztied.D" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veorq_s32", + "SIMD_ISA": "SVE", + "name": "svcnt[_s64]_z", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svint64_t op" ], "return_type": { - "value": "int32x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.D" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veorq_s64", + "SIMD_ISA": "SVE", + "name": "svcnt[_s8]_m", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "svuint8_t inactive", + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "int64x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "inactive": { + "register": "Zinactive.B|Ztied.B" }, - "b": { - "register": "Vm.16B" + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veorq_s8", + "SIMD_ISA": "SVE", + "name": "svcnt[_s8]_x", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "int8x16_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.B|Ztied.B" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veorq_u16", + "SIMD_ISA": "SVE", + "name": "svcnt[_s8]_z", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "svbool_t pg", + "svint8_t op" ], "return_type": { - "value": "uint16x8_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.B" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veorq_u32", + "SIMD_ISA": "SVE", + "name": "svcnt[_u16]_m", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "uint32x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "inactive": { + "register": "Zinactive.H|Ztied.H" }, - "b": { - "register": "Vm.16B" + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veorq_u64", + "SIMD_ISA": "SVE", + "name": "svcnt[_u16]_x", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "uint64x2_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.H|Ztied.H" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "veorq_u8", + "SIMD_ISA": "SVE", + "name": "svcnt[_u16]_z", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "svbool_t pg", + "svuint16_t op" ], "return_type": { - "value": "uint8x16_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "op": { + "register": "Zop.H" }, - "b": { - "register": "Vm.16B" + "pg": { + "register": "Pg.H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EOR" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_f16", + "SIMD_ISA": "SVE", + "name": "svcnt[_u32]_m", "arguments": [ - "float16x4_t a", - "float16x4_t b", - "const int n" + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "float16x4_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "inactive": { + "register": "Zinactive.S|Ztied.S" }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.S" }, - "n": { - "minimum": 0, - "maximum": 3 + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EXT" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_f32", + "SIMD_ISA": "SVE", + "name": "svcnt[_u32]_x", "arguments": [ - "float32x2_t a", - "float32x2_t b", - "const int n" + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "float32x2_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.S|Ztied.S" }, - "b": { - "register": "Vm.8B" - }, - "n": { - "minimum": 0, - "maximum": 1 + "pg": { + "register": "Pg.S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EXT" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_f64", + "SIMD_ISA": "SVE", + "name": "svcnt[_u32]_z", "arguments": [ - "float64x1_t a", - "float64x1_t b", - "const int n" + "svbool_t pg", + "svuint32_t op" ], "return_type": { - "value": "float64x1_t" + "value": "svuint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "op": { + "register": "Zop.S" }, - "b": { - "register": "Vm.8B" - }, - "n": { - "minimum": 0, - "maximum": 0 + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -28922,266 +34694,198207 @@ ], "instructions": [ [ - "EXT" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_p16", + "SIMD_ISA": "SVE", + "name": "svcnt[_u64]_m", "arguments": [ - "poly16x4_t a", - "poly16x4_t b", - "const int n" + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "poly16x4_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "inactive": { + "register": "Zinactive.D|Ztied.D" }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.D" }, - "n": { - "minimum": 0, - "maximum": 3 + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EXT" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_p64", + "SIMD_ISA": "SVE", + "name": "svcnt[_u64]_x", "arguments": [ - "poly64x1_t a", - "poly64x1_t b", - "const int n" + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "poly64x1_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.D|Ztied.D" }, - "n": { - "minimum": 0, - "maximum": 0 + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "EXT" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_p8", + "SIMD_ISA": "SVE", + "name": "svcnt[_u64]_z", "arguments": [ - "poly8x8_t a", - "poly8x8_t b", - "const int n" + "svbool_t pg", + "svuint64_t op" ], "return_type": { - "value": "poly8x8_t" + "value": "svuint64_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.D" }, - "n": { - "minimum": 0, - "maximum": 7 + "pg": { + "register": "Pg.D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EXT" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_s16", + "SIMD_ISA": "SVE", + "name": "svcnt[_u8]_m", "arguments": [ - "int16x4_t a", - "int16x4_t b", - "const int n" + "svuint8_t inactive", + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "int16x4_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "inactive": { + "register": "Zinactive.B|Ztied.B" }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.B" }, - "n": { - "minimum": 0, - "maximum": 3 + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EXT" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_s32", + "SIMD_ISA": "SVE", + "name": "svcnt[_u8]_x", "arguments": [ - "int32x2_t a", - "int32x2_t b", - "const int n" + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "int32x2_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.B|Ztied.B" }, - "n": { - "minimum": 0, - "maximum": 1 + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EXT" + "CNT" + ], + [ + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_s64", + "SIMD_ISA": "SVE", + "name": "svcnt[_u8]_z", "arguments": [ - "int64x1_t a", - "int64x1_t b", - "const int n" + "svbool_t pg", + "svuint8_t op" ], "return_type": { - "value": "int64x1_t" + "value": "svuint8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "op": { + "register": "Zop.B" }, - "n": { - "minimum": 0, - "maximum": 0 + "pg": { + "register": "Pg.B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EXT" + "MOVPRFX", + "CNT" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_s8", - "arguments": [ - "int8x8_t a", - "int8x8_t b", - "const int n" - ], + "SIMD_ISA": "SVE", + "name": "svcntb", + "arguments": [], "return_type": { - "value": "int8x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" - }, - "n": { - "minimum": 0, - "maximum": 7 - } + "value": "uint64_t" }, + "Arguments_Preparation": {}, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "EXT" + "CNTB" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vext_u16", + "SIMD_ISA": "SVE", + "name": "svcntb_pat", "arguments": [ - "uint16x4_t a", - "uint16x4_t b", - "const int n" + "enum svpattern pattern" ], "return_type": { - "value": "uint16x4_t" + "value": "uint64_t" }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" - }, - "n": { + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntd", + "arguments": [], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntd_pat", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnth", + "arguments": [], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnth_pat", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntp_b16", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntp_b32", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntp_b64", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntp_b8", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcntp_c16", + "arguments": [ + "svcount_t pnn", + "uint64_t vl" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "pnn": { + "register": "PNreg1.H" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcntp_c32", + "arguments": [ + "svcount_t pnn", + "uint64_t vl" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "pnn": { + "register": "PNreg1.S" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcntp_c64", + "arguments": [ + "svcount_t pnn", + "uint64_t vl" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "pnn": { + "register": "PNreg1.D" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcntp_c8", + "arguments": [ + "svcount_t pnn", + "uint64_t vl" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "pnn": { + "register": "PNreg1.B" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntw", + "arguments": [], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntw_pat", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcompact[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "COMPACT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcompact[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "COMPACT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcompact[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "COMPACT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcompact[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "COMPACT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcompact[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "COMPACT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcompact[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "COMPACT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcreate2[_b]", + "arguments": [ + "svbool_t x", + "svbool_t y" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_f16]", + "arguments": [ + "svfloat16_t x0", + "svfloat16_t x1" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_f32]", + "arguments": [ + "svfloat32_t x0", + "svfloat32_t x1" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_f64]", + "arguments": [ + "svfloat64_t x0", + "svfloat64_t x1" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_s16]", + "arguments": [ + "svint16_t x0", + "svint16_t x1" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_s32]", + "arguments": [ + "svint32_t x0", + "svint32_t x1" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_s64]", + "arguments": [ + "svint64_t x0", + "svint64_t x1" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_s8]", + "arguments": [ + "svint8_t x0", + "svint8_t x1" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_u16]", + "arguments": [ + "svuint16_t x0", + "svuint16_t x1" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_u32]", + "arguments": [ + "svuint32_t x0", + "svuint32_t x1" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_u64]", + "arguments": [ + "svuint64_t x0", + "svuint64_t x1" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_u8]", + "arguments": [ + "svuint8_t x0", + "svuint8_t x1" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_f16]", + "arguments": [ + "svfloat16_t x0", + "svfloat16_t x1", + "svfloat16_t x2" + ], + "return_type": { + "value": "svfloat16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_f32]", + "arguments": [ + "svfloat32_t x0", + "svfloat32_t x1", + "svfloat32_t x2" + ], + "return_type": { + "value": "svfloat32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_f64]", + "arguments": [ + "svfloat64_t x0", + "svfloat64_t x1", + "svfloat64_t x2" + ], + "return_type": { + "value": "svfloat64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_s16]", + "arguments": [ + "svint16_t x0", + "svint16_t x1", + "svint16_t x2" + ], + "return_type": { + "value": "svint16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_s32]", + "arguments": [ + "svint32_t x0", + "svint32_t x1", + "svint32_t x2" + ], + "return_type": { + "value": "svint32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_s64]", + "arguments": [ + "svint64_t x0", + "svint64_t x1", + "svint64_t x2" + ], + "return_type": { + "value": "svint64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_s8]", + "arguments": [ + "svint8_t x0", + "svint8_t x1", + "svint8_t x2" + ], + "return_type": { + "value": "svint8x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_u16]", + "arguments": [ + "svuint16_t x0", + "svuint16_t x1", + "svuint16_t x2" + ], + "return_type": { + "value": "svuint16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_u32]", + "arguments": [ + "svuint32_t x0", + "svuint32_t x1", + "svuint32_t x2" + ], + "return_type": { + "value": "svuint32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_u64]", + "arguments": [ + "svuint64_t x0", + "svuint64_t x1", + "svuint64_t x2" + ], + "return_type": { + "value": "svuint64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_u8]", + "arguments": [ + "svuint8_t x0", + "svuint8_t x1", + "svuint8_t x2" + ], + "return_type": { + "value": "svuint8x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcreate4[_b]", + "arguments": [ + "svbool_t x", + "svbool_t y", + "svbool_t z", + "svbool_t w" + ], + "return_type": { + "value": "svboolx4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_f16]", + "arguments": [ + "svfloat16_t x0", + "svfloat16_t x1", + "svfloat16_t x2", + "svfloat16_t x3" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_f32]", + "arguments": [ + "svfloat32_t x0", + "svfloat32_t x1", + "svfloat32_t x2", + "svfloat32_t x3" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_f64]", + "arguments": [ + "svfloat64_t x0", + "svfloat64_t x1", + "svfloat64_t x2", + "svfloat64_t x3" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_s16]", + "arguments": [ + "svint16_t x0", + "svint16_t x1", + "svint16_t x2", + "svint16_t x3" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_s32]", + "arguments": [ + "svint32_t x0", + "svint32_t x1", + "svint32_t x2", + "svint32_t x3" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_s64]", + "arguments": [ + "svint64_t x0", + "svint64_t x1", + "svint64_t x2", + "svint64_t x3" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_s8]", + "arguments": [ + "svint8_t x0", + "svint8_t x1", + "svint8_t x2", + "svint8_t x3" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_u16]", + "arguments": [ + "svuint16_t x0", + "svuint16_t x1", + "svuint16_t x2", + "svuint16_t x3" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_u32]", + "arguments": [ + "svuint32_t x0", + "svuint32_t x1", + "svuint32_t x2", + "svuint32_t x3" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_u64]", + "arguments": [ + "svuint64_t x0", + "svuint64_t x1", + "svuint64_t x2", + "svuint64_t x3" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_u8]", + "arguments": [ + "svuint8_t x0", + "svuint8_t x1", + "svuint8_t x2", + "svuint8_t x3" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_f32]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_f64]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s32]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s64]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u32]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u64]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_f16]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_f64]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_s32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_s64]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_u32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_u64]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_f16]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_f32]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_s32]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_s64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_u32]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_u64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s16[_f16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s16[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s16[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f16]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f64]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f16]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f32]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u16[_f16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u16[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u16[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f16]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f64]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f16]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f32]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtlt_f32[_f16]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtlt_f32[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtlt_f64[_f32]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.D" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtlt_f64[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtnt_f16[_f32]_m", + "arguments": [ + "svfloat16_t even", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtnt_f16[_f32]_x", + "arguments": [ + "svfloat16_t even", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtnt_f32[_f64]_m", + "arguments": [ + "svfloat32_t even", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtnt_f32[_f64]_x", + "arguments": [ + "svfloat32_t even", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtx_f32[_f64]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTX" + ], + [ + "MOVPRFX", + "FCVTX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtx_f32[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTX" + ], + [ + "MOVPRFX", + "FCVTX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtx_f32[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtxnt_f32[_f64]_m", + "arguments": [ + "svfloat32_t even", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTXNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtxnt_f32[_f64]_x", + "arguments": [ + "svfloat32_t even", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTXNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svdot[_f32_f16]", + "arguments": [ + "svfloat32_t zda", + "svfloat16_t zn", + "svfloat16_t zm" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "zda": { + "register": "Zreg1.S" + }, + "zm": { + "register": "Zreg3.H" + }, + "zn": { + "register": "Zreg2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ], + [ + "MOVPRFX", + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ], + [ + "MOVPRFX", + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ], + [ + "MOVPRFX", + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ], + [ + "MOVPRFX", + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ], + [ + "MOVPRFX", + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svdot[_s32_s16]", + "arguments": [ + "svint32_t zda", + "svint16_t zn", + "svint16_t zm" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "zda": { + "register": "Zreg1.S" + }, + "zm": { + "register": "Zreg3.H" + }, + "zn": { + "register": "Zreg2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_s64]", + "arguments": [ + "svint64_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ], + [ + "MOVPRFX", + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ], + [ + "MOVPRFX", + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svdot[_u32_u16]", + "arguments": [ + "svuint32_t zda", + "svuint16_t zn", + "svuint16_t zm" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "zda": { + "register": "Zreg1.S" + }, + "zm": { + "register": "Zreg3.H" + }, + "zn": { + "register": "Zreg2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ], + [ + "MOVPRFX", + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svdot_lane[_f32_f16]", + "arguments": [ + "svfloat32_t zda", + "svfloat16_t zn", + "svfloat16_t zm", + "uint64_t imm_idx" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_idx": { + "immediate": "imm1" + }, + "zda": { + "register": "Zreg1.S" + }, + "zm": { + "register": "Zreg3.H" + }, + "zn": { + "register": "Zreg2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "svint8_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ], + [ + "MOVPRFX", + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svdot_lane[_s32_s16]", + "arguments": [ + "svint32_t zda", + "svint16_t zn", + "svint16_t zm", + "uint64_t imm_idx" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_idx": { + "immediate": "imm1" + }, + "zda": { + "register": "Zreg1.S" + }, + "zm": { + "register": "Zreg3.H" + }, + "zn": { + "register": "Zreg2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ], + [ + "MOVPRFX", + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint8_t op2", + "svuint8_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ], + [ + "MOVPRFX", + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svdot_lane[_u32_u16]", + "arguments": [ + "svuint32_t zda", + "svuint16_t zn", + "svuint16_t zm", + "uint64_t imm_idx" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_idx": { + "immediate": "imm1" + }, + "zda": { + "register": "Zreg1.S" + }, + "zm": { + "register": "Zreg3.H" + }, + "zn": { + "register": "Zreg2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ], + [ + "MOVPRFX", + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_b16", + "arguments": [ + "bool op" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_b32", + "arguments": [ + "bool op" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_b64", + "arguments": [ + "bool op" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_b8", + "arguments": [ + "bool op" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f16", + "arguments": [ + "float16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f16_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "float16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.H" + }, + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f16_x", + "arguments": [ + "svbool_t pg", + "float16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f16_z", + "arguments": [ + "svbool_t pg", + "float16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f32", + "arguments": [ + "float32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f32_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "float32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.S" + }, + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f32_x", + "arguments": [ + "svbool_t pg", + "float32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f32_z", + "arguments": [ + "svbool_t pg", + "float32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f64", + "arguments": [ + "float64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f64_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "float64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.D" + }, + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f64_x", + "arguments": [ + "svbool_t pg", + "float64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f64_z", + "arguments": [ + "svbool_t pg", + "float64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s16", + "arguments": [ + "int16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s16_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "int16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.H" + }, + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s16_x", + "arguments": [ + "svbool_t pg", + "int16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s16_z", + "arguments": [ + "svbool_t pg", + "int16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s32", + "arguments": [ + "int32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s32_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "int32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.S" + }, + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s32_x", + "arguments": [ + "svbool_t pg", + "int32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s32_z", + "arguments": [ + "svbool_t pg", + "int32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s64", + "arguments": [ + "int64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s64_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "int64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.D" + }, + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s64_x", + "arguments": [ + "svbool_t pg", + "int64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s64_z", + "arguments": [ + "svbool_t pg", + "int64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s8", + "arguments": [ + "int8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Bop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s8_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "int8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.B" + }, + "op": { + "register": "Bop|Wop" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s8_x", + "arguments": [ + "svbool_t pg", + "int8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Bop|Wop" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s8_z", + "arguments": [ + "svbool_t pg", + "int8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Bop|Wop" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u16", + "arguments": [ + "uint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u16_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "uint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.H" + }, + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u16_x", + "arguments": [ + "svbool_t pg", + "uint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u16_z", + "arguments": [ + "svbool_t pg", + "uint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u32", + "arguments": [ + "uint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u32_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "uint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.S" + }, + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u32_x", + "arguments": [ + "svbool_t pg", + "uint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u32_z", + "arguments": [ + "svbool_t pg", + "uint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u64", + "arguments": [ + "uint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u64_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "uint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.D" + }, + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u64_x", + "arguments": [ + "svbool_t pg", + "uint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u64_z", + "arguments": [ + "svbool_t pg", + "uint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u8", + "arguments": [ + "uint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Bop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u8_m", + "arguments": [ + "svuint8_t inactive", + "svbool_t pg", + "uint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.B" + }, + "op": { + "register": "Bop|Wop" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u8_x", + "arguments": [ + "svbool_t pg", + "uint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Bop|Wop" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u8_z", + "arguments": [ + "svbool_t pg", + "uint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Bop|Wop" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_f16]", + "arguments": [ + "svfloat16_t data", + "uint16_t index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "index": { + "register": "Zindex.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_f32]", + "arguments": [ + "svfloat32_t data", + "uint32_t index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "index": { + "register": "Zindex.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_f64]", + "arguments": [ + "svfloat64_t data", + "uint64_t index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "index": { + "register": "Zindex.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_s16]", + "arguments": [ + "svint16_t data", + "uint16_t index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "index": { + "register": "Zindex.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_s32]", + "arguments": [ + "svint32_t data", + "uint32_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "index": { + "register": "Zindex.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_s64]", + "arguments": [ + "svint64_t data", + "uint64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "index": { + "register": "Zindex.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_s8]", + "arguments": [ + "svint8_t data", + "uint8_t index" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "index": { + "register": "Zindex.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_u16]", + "arguments": [ + "svuint16_t data", + "uint16_t index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "index": { + "register": "Zindex.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_u32]", + "arguments": [ + "svuint32_t data", + "uint32_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "index": { + "register": "Zindex.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_u64]", + "arguments": [ + "svuint64_t data", + "uint64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "index": { + "register": "Zindex.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_u8]", + "arguments": [ + "svuint8_t data", + "uint8_t index" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "index": { + "register": "Zindex.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_b16", + "arguments": [ + "bool x0", + "bool x1", + "bool x2", + "bool x3", + "bool x4", + "bool x5", + "bool x6", + "bool x7" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_b32", + "arguments": [ + "bool x0", + "bool x1", + "bool x2", + "bool x3" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_b64", + "arguments": [ + "bool x0", + "bool x1" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_b8", + "arguments": [ + "bool x0", + "bool x1", + "bool x2", + "bool x3", + "bool x4", + "bool x5", + "bool x6", + "bool x7", + "bool x8", + "bool x9", + "bool x10", + "bool x11", + "bool x12", + "bool x13", + "bool x14", + "bool x15" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_f16", + "arguments": [ + "float16_t x0", + "float16_t x1", + "float16_t x2", + "float16_t x3", + "float16_t x4", + "float16_t x5", + "float16_t x6", + "float16_t x7" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_f32", + "arguments": [ + "float32_t x0", + "float32_t x1", + "float32_t x2", + "float32_t x3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_f64", + "arguments": [ + "float64_t x0", + "float64_t x1" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_s16", + "arguments": [ + "int16_t x0", + "int16_t x1", + "int16_t x2", + "int16_t x3", + "int16_t x4", + "int16_t x5", + "int16_t x6", + "int16_t x7" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_s32", + "arguments": [ + "int32_t x0", + "int32_t x1", + "int32_t x2", + "int32_t x3" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_s64", + "arguments": [ + "int64_t x0", + "int64_t x1" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_s8", + "arguments": [ + "int8_t x0", + "int8_t x1", + "int8_t x2", + "int8_t x3", + "int8_t x4", + "int8_t x5", + "int8_t x6", + "int8_t x7", + "int8_t x8", + "int8_t x9", + "int8_t x10", + "int8_t x11", + "int8_t x12", + "int8_t x13", + "int8_t x14", + "int8_t x15" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_u16", + "arguments": [ + "uint16_t x0", + "uint16_t x1", + "uint16_t x2", + "uint16_t x3", + "uint16_t x4", + "uint16_t x5", + "uint16_t x6", + "uint16_t x7" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_u32", + "arguments": [ + "uint32_t x0", + "uint32_t x1", + "uint32_t x2", + "uint32_t x3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_u64", + "arguments": [ + "uint64_t x0", + "uint64_t x1" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_u8", + "arguments": [ + "uint8_t x0", + "uint8_t x1", + "uint8_t x2", + "uint8_t x3", + "uint8_t x4", + "uint8_t x5", + "uint8_t x6", + "uint8_t x7", + "uint8_t x8", + "uint8_t x9", + "uint8_t x10", + "uint8_t x11", + "uint8_t x12", + "uint8_t x13", + "uint8_t x14", + "uint8_t x15" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_f16]", + "arguments": [ + "svfloat16_t data", + "uint64_t index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_f32]", + "arguments": [ + "svfloat32_t data", + "uint64_t index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_f64]", + "arguments": [ + "svfloat64_t data", + "uint64_t index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_s16]", + "arguments": [ + "svint16_t data", + "uint64_t index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_s32]", + "arguments": [ + "svint32_t data", + "uint64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_s64]", + "arguments": [ + "svint64_t data", + "uint64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_s8]", + "arguments": [ + "svint8_t data", + "uint64_t index" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_u16]", + "arguments": [ + "svuint16_t data", + "uint64_t index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_u32]", + "arguments": [ + "svuint32_t data", + "uint64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_u64]", + "arguments": [ + "svuint64_t data", + "uint64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_u8]", + "arguments": [ + "svuint8_t data", + "uint64_t index" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_s16]", + "arguments": [ + "svint16_t odd", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_s32]", + "arguments": [ + "svint32_t odd", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_s64]", + "arguments": [ + "svint64_t odd", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_s8]", + "arguments": [ + "svint8_t odd", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_u16]", + "arguments": [ + "svuint16_t odd", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_u32]", + "arguments": [ + "svuint32_t odd", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_u64]", + "arguments": [ + "svuint64_t odd", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_u8]", + "arguments": [ + "svuint8_t odd", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_s16]", + "arguments": [ + "svint16_t odd", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_s32]", + "arguments": [ + "svint32_t odd", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_s64]", + "arguments": [ + "svint64_t odd", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_s8]", + "arguments": [ + "svint8_t odd", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_u16]", + "arguments": [ + "svuint16_t odd", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_u32]", + "arguments": [ + "svuint32_t odd", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_u64]", + "arguments": [ + "svuint64_t odd", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_u8]", + "arguments": [ + "svuint8_t odd", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_s16]", + "arguments": [ + "svint16_t even", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_s32]", + "arguments": [ + "svint32_t even", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_s64]", + "arguments": [ + "svint64_t even", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_s8]", + "arguments": [ + "svint8_t even", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_u16]", + "arguments": [ + "svuint16_t even", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_u32]", + "arguments": [ + "svuint32_t even", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_u64]", + "arguments": [ + "svuint64_t even", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_u8]", + "arguments": [ + "svuint8_t even", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_s16]", + "arguments": [ + "svint16_t even", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_s32]", + "arguments": [ + "svint32_t even", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_s64]", + "arguments": [ + "svint64_t even", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_s8]", + "arguments": [ + "svint8_t even", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_u16]", + "arguments": [ + "svuint16_t even", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_u32]", + "arguments": [ + "svuint32_t even", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_u64]", + "arguments": [ + "svuint64_t even", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_u8]", + "arguments": [ + "svuint8_t even", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexpa[_f16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FEXPA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexpa[_f32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FEXPA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexpa[_f64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FEXPA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 127 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 127 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 255 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 127 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 255 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTB" + ], + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTB" + ], + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTB" + ], + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTB" + ], + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTB" + ], + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTB" + ], + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "MOVPRFX", + "UXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "MOVPRFX", + "UXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "MOVPRFX", + "UXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTH" + ], + [ + "MOVPRFX", + "SXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTH" + ], + [ + "MOVPRFX", + "SXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTH" + ], + [ + "MOVPRFX", + "SXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTH" + ], + [ + "MOVPRFX", + "SXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTH" + ], + [ + "MOVPRFX", + "UXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTH" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTH" + ], + [ + "MOVPRFX", + "UXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTH" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextw[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTW" + ], + [ + "MOVPRFX", + "SXTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextw[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTW" + ], + [ + "MOVPRFX", + "SXTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextw[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SXTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextw[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTW" + ], + [ + "MOVPRFX", + "UXTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextw[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTW" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextw[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svget2[_b]", + "arguments": [ + "svboolx2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_f16]", + "arguments": [ + "svfloat16x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_f32]", + "arguments": [ + "svfloat32x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_f64]", + "arguments": [ + "svfloat64x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_s16]", + "arguments": [ + "svint16x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_s32]", + "arguments": [ + "svint32x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_s64]", + "arguments": [ + "svint64x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_s8]", + "arguments": [ + "svint8x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_u16]", + "arguments": [ + "svuint16x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_u32]", + "arguments": [ + "svuint32x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_u64]", + "arguments": [ + "svuint64x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_u8]", + "arguments": [ + "svuint8x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_f16]", + "arguments": [ + "svfloat16x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_f32]", + "arguments": [ + "svfloat32x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_f64]", + "arguments": [ + "svfloat64x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_s16]", + "arguments": [ + "svint16x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_s32]", + "arguments": [ + "svint32x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_s64]", + "arguments": [ + "svint64x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_s8]", + "arguments": [ + "svint8x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_u16]", + "arguments": [ + "svuint16x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_u32]", + "arguments": [ + "svuint32x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_u64]", + "arguments": [ + "svuint64x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_u8]", + "arguments": [ + "svuint8x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svget4[_b]", + "arguments": [ + "svboolx4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_f16]", + "arguments": [ + "svfloat16x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_f32]", + "arguments": [ + "svfloat32x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_f64]", + "arguments": [ + "svfloat64x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_s16]", + "arguments": [ + "svint16x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_s32]", + "arguments": [ + "svint32x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_s64]", + "arguments": [ + "svint64x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_s8]", + "arguments": [ + "svint8x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_u16]", + "arguments": [ + "svuint16x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_u32]", + "arguments": [ + "svuint32x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_u64]", + "arguments": [ + "svuint64x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_u8]", + "arguments": [ + "svuint8x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhistcnt[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "HISTCNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhistcnt[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "HISTCNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhistcnt[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "HISTCNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhistcnt[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "HISTCNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhistseg[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "HISTSEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhistseg[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "HISTSEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_s16", + "arguments": [ + "int16_t base", + "int16_t step" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Wbase" + }, + "step": { + "register": "Wstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_s32", + "arguments": [ + "int32_t base", + "int32_t step" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Wbase" + }, + "step": { + "register": "Wstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_s64", + "arguments": [ + "int64_t base", + "int64_t step" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "step": { + "register": "Xstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_s8", + "arguments": [ + "int8_t base", + "int8_t step" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Wbase" + }, + "step": { + "register": "Wstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_u16", + "arguments": [ + "uint16_t base", + "uint16_t step" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Wbase" + }, + "step": { + "register": "Wstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_u32", + "arguments": [ + "uint32_t base", + "uint32_t step" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Wbase" + }, + "step": { + "register": "Wstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_u64", + "arguments": [ + "uint64_t base", + "uint64_t step" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "step": { + "register": "Xstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_u8", + "arguments": [ + "uint8_t base", + "uint8_t step" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Wbase" + }, + "step": { + "register": "Wstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_f16]", + "arguments": [ + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Hop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_f32]", + "arguments": [ + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Sop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_f64]", + "arguments": [ + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Dop2|Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Hop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Sop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Dop2|Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_s8]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Bop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Hop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Sop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Dop2|Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Bop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_f16]_x2", + "arguments": [ + "svcount_t png", + "float16_t const * rn" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_f16]_x4", + "arguments": [ + "svcount_t png", + "float16_t const * rn" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_f32]_x2", + "arguments": [ + "svcount_t png", + "float32_t const * rn" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_f32]_x4", + "arguments": [ + "svcount_t png", + "float32_t const * rn" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_f64]_x2", + "arguments": [ + "svcount_t png", + "float64_t const * rn" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_f64]_x4", + "arguments": [ + "svcount_t png", + "float64_t const * rn" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s16]_x2", + "arguments": [ + "svcount_t png", + "int16_t const * rn" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s16]_x4", + "arguments": [ + "svcount_t png", + "int16_t const * rn" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s32]_x2", + "arguments": [ + "svcount_t png", + "int32_t const * rn" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s32]_x4", + "arguments": [ + "svcount_t png", + "int32_t const * rn" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s64]_x2", + "arguments": [ + "svcount_t png", + "int64_t const * rn" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s64]_x4", + "arguments": [ + "svcount_t png", + "int64_t const * rn" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s8]_x2", + "arguments": [ + "svcount_t png", + "int8_t const * rn" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s8]_x4", + "arguments": [ + "svcount_t png", + "int8_t const * rn" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u16]_x2", + "arguments": [ + "svcount_t png", + "uint16_t const * rn" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u16]_x4", + "arguments": [ + "svcount_t png", + "uint16_t const * rn" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u32]_x2", + "arguments": [ + "svcount_t png", + "uint32_t const * rn" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u32]_x4", + "arguments": [ + "svcount_t png", + "uint32_t const * rn" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u64]_x2", + "arguments": [ + "svcount_t png", + "uint64_t const * rn" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u64]_x4", + "arguments": [ + "svcount_t png", + "uint64_t const * rn" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u8]_x2", + "arguments": [ + "svcount_t png", + "uint8_t const * rn" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u8]_x4", + "arguments": [ + "svcount_t png", + "uint8_t const * rn" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_index_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_offset_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_index_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_offset_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s32]index[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s32]index[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s32]index[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s64]index[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u32]index[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u32]index[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u32]index[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u64]index[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_f16]_x2", + "arguments": [ + "svcount_t png", + "float16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_f16]_x4", + "arguments": [ + "svcount_t png", + "float16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_f32]_x2", + "arguments": [ + "svcount_t png", + "float32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_f32]_x4", + "arguments": [ + "svcount_t png", + "float32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_f64]_x2", + "arguments": [ + "svcount_t png", + "float64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_f64]_x4", + "arguments": [ + "svcount_t png", + "float64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s16]_x2", + "arguments": [ + "svcount_t png", + "int16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s16]_x4", + "arguments": [ + "svcount_t png", + "int16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s32]_x2", + "arguments": [ + "svcount_t png", + "int32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s32]_x4", + "arguments": [ + "svcount_t png", + "int32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s64]_x2", + "arguments": [ + "svcount_t png", + "int64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s64]_x4", + "arguments": [ + "svcount_t png", + "int64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s8]_x2", + "arguments": [ + "svcount_t png", + "int8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s8]_x4", + "arguments": [ + "svcount_t png", + "int8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u16]_x2", + "arguments": [ + "svcount_t png", + "uint16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u16]_x4", + "arguments": [ + "svcount_t png", + "uint16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u32]_x2", + "arguments": [ + "svcount_t png", + "uint32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u32]_x4", + "arguments": [ + "svcount_t png", + "uint32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u64]_x2", + "arguments": [ + "svcount_t png", + "uint64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u64]_x4", + "arguments": [ + "svcount_t png", + "uint64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u8]_x2", + "arguments": [ + "svcount_t png", + "uint8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u8]_x4", + "arguments": [ + "svcount_t png", + "uint8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROH" + ], + [ + "LD1ROH" + ], + [ + "LD1ROH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROW" + ], + [ + "LD1ROW" + ], + [ + "LD1ROW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROD" + ], + [ + "LD1ROD" + ], + [ + "LD1ROD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROH" + ], + [ + "LD1ROH" + ], + [ + "LD1ROH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROW" + ], + [ + "LD1ROW" + ], + [ + "LD1ROW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROD" + ], + [ + "LD1ROD" + ], + [ + "LD1ROD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROB" + ], + [ + "LD1ROB" + ], + [ + "LD1ROB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROH" + ], + [ + "LD1ROH" + ], + [ + "LD1ROH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROW" + ], + [ + "LD1ROW" + ], + [ + "LD1ROW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROD" + ], + [ + "LD1ROD" + ], + [ + "LD1ROD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROB" + ], + [ + "LD1ROB" + ], + [ + "LD1ROB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQH" + ], + [ + "LD1RQH" + ], + [ + "LD1RQH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQW" + ], + [ + "LD1RQW" + ], + [ + "LD1RQW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQD" + ], + [ + "LD1RQD" + ], + [ + "LD1RQD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQH" + ], + [ + "LD1RQH" + ], + [ + "LD1RQH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQW" + ], + [ + "LD1RQW" + ], + [ + "LD1RQW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQD" + ], + [ + "LD1RQD" + ], + [ + "LD1RQD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQB" + ], + [ + "LD1RQB" + ], + [ + "LD1RQB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQH" + ], + [ + "LD1RQH" + ], + [ + "LD1RQH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQW" + ], + [ + "LD1RQW" + ], + [ + "LD1RQW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQD" + ], + [ + "LD1RQD" + ], + [ + "LD1RQD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQB" + ], + [ + "LD1RQB" + ], + [ + "LD1RQB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_s16", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_u16", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_vnum_s16", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_vnum_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_vnum_u16", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_vnum_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s32]index_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s32]index_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u32]index_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u32]index_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_vnum_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_vnum_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_s16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_u16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_vnum_s16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_vnum_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_vnum_u16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_vnum_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s32]index_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s32]index_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u32]index_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u32]index_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_vnum_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_vnum_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2H" + ], + [ + "LD2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2W" + ], + [ + "LD2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2D" + ], + [ + "LD2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2H" + ], + [ + "LD2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2W" + ], + [ + "LD2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2D" + ], + [ + "LD2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2B" + ], + [ + "LD2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2H" + ], + [ + "LD2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2W" + ], + [ + "LD2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2D" + ], + [ + "LD2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2B" + ], + [ + "LD2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2H" + ], + [ + "LD2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2W" + ], + [ + "LD2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2D" + ], + [ + "LD2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2H" + ], + [ + "LD2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2W" + ], + [ + "LD2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2D" + ], + [ + "LD2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2B" + ], + [ + "LD2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2H" + ], + [ + "LD2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2W" + ], + [ + "LD2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2D" + ], + [ + "LD2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2B" + ], + [ + "LD2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3H" + ], + [ + "LD3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3W" + ], + [ + "LD3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3D" + ], + [ + "LD3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3H" + ], + [ + "LD3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3W" + ], + [ + "LD3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3D" + ], + [ + "LD3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3B" + ], + [ + "LD3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3H" + ], + [ + "LD3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3W" + ], + [ + "LD3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3D" + ], + [ + "LD3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3B" + ], + [ + "LD3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3H" + ], + [ + "LD3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3W" + ], + [ + "LD3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3D" + ], + [ + "LD3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3H" + ], + [ + "LD3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3W" + ], + [ + "LD3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3D" + ], + [ + "LD3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3B" + ], + [ + "LD3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3H" + ], + [ + "LD3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3W" + ], + [ + "LD3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3D" + ], + [ + "LD3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3B" + ], + [ + "LD3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4H" + ], + [ + "LD4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4W" + ], + [ + "LD4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4D" + ], + [ + "LD4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4H" + ], + [ + "LD4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4W" + ], + [ + "LD4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4D" + ], + [ + "LD4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4B" + ], + [ + "LD4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4H" + ], + [ + "LD4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4W" + ], + [ + "LD4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4D" + ], + [ + "LD4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4B" + ], + [ + "LD4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4H" + ], + [ + "LD4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4W" + ], + [ + "LD4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4D" + ], + [ + "LD4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4H" + ], + [ + "LD4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4W" + ], + [ + "LD4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4D" + ], + [ + "LD4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4B" + ], + [ + "LD4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4H" + ], + [ + "LD4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4W" + ], + [ + "LD4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4D" + ], + [ + "LD4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4B" + ], + [ + "LD4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_index_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_offset_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_index_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_offset_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s32]index[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s32]index[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s32]index[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s64]index[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u32]index[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u32]index[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u32]index[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u64]index[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_s16", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_u16", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_vnum_s16", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_vnum_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_vnum_u16", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_vnum_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s32]index_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s32]index_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u32]index_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u32]index_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_vnum_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_vnum_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ], + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ], + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ], + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ], + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ], + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ], + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_s16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_u16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_vnum_s16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_vnum_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_vnum_u16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_vnum_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s32]index_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s32]index_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u32]index_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u32]index_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_vnum_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_vnum_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ], + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 8": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1D" + ], + [ + "LDNF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ], + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 8": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1D" + ], + [ + "LDNF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntb()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ], + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 8": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1D" + ], + [ + "LDNF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntb()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_s16", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_u16", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_vnum_s16", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ], + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_vnum_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ], + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ], + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_vnum_u16", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ], + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_vnum_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ], + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ], + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_vnum_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ], + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ], + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_vnum_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ], + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ], + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sw_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sw_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sw_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SW" + ], + [ + "LDNF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sw_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SW" + ], + [ + "LDNF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_s16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_u16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_vnum_s16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_vnum_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_vnum_u16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_vnum_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_vnum_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_vnum_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uw_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uw_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uw_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ], + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uw_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ], + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ], + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_f16]_x2", + "arguments": [ + "svcount_t png", + "float16_t const * rn" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_f16]_x4", + "arguments": [ + "svcount_t png", + "float16_t const * rn" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ], + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_f32]_x2", + "arguments": [ + "svcount_t png", + "float32_t const * rn" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_f32]_x4", + "arguments": [ + "svcount_t png", + "float32_t const * rn" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ], + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_f64]_x2", + "arguments": [ + "svcount_t png", + "float64_t const * rn" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_f64]_x4", + "arguments": [ + "svcount_t png", + "float64_t const * rn" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ], + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s16]_x2", + "arguments": [ + "svcount_t png", + "int16_t const * rn" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s16]_x4", + "arguments": [ + "svcount_t png", + "int16_t const * rn" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ], + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s32]_x2", + "arguments": [ + "svcount_t png", + "int32_t const * rn" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s32]_x4", + "arguments": [ + "svcount_t png", + "int32_t const * rn" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ], + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s64]_x2", + "arguments": [ + "svcount_t png", + "int64_t const * rn" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s64]_x4", + "arguments": [ + "svcount_t png", + "int64_t const * rn" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ], + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s8]_x2", + "arguments": [ + "svcount_t png", + "int8_t const * rn" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s8]_x4", + "arguments": [ + "svcount_t png", + "int8_t const * rn" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ], + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u16]_x2", + "arguments": [ + "svcount_t png", + "uint16_t const * rn" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u16]_x4", + "arguments": [ + "svcount_t png", + "uint16_t const * rn" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ], + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u32]_x2", + "arguments": [ + "svcount_t png", + "uint32_t const * rn" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u32]_x4", + "arguments": [ + "svcount_t png", + "uint32_t const * rn" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ], + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u64]_x2", + "arguments": [ + "svcount_t png", + "uint64_t const * rn" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u64]_x4", + "arguments": [ + "svcount_t png", + "uint64_t const * rn" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ], + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u8]_x2", + "arguments": [ + "svcount_t png", + "uint8_t const * rn" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u8]_x4", + "arguments": [ + "svcount_t png", + "uint8_t const * rn" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_index_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_offset_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_index_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_offset_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[s64]index[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[s64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u64]index[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ], + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_f16]_x2", + "arguments": [ + "svcount_t png", + "float16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_f16]_x4", + "arguments": [ + "svcount_t png", + "float16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ], + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_f32]_x2", + "arguments": [ + "svcount_t png", + "float32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_f32]_x4", + "arguments": [ + "svcount_t png", + "float32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ], + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_f64]_x2", + "arguments": [ + "svcount_t png", + "float64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_f64]_x4", + "arguments": [ + "svcount_t png", + "float64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ], + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s16]_x2", + "arguments": [ + "svcount_t png", + "int16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s16]_x4", + "arguments": [ + "svcount_t png", + "int16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ], + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s32]_x2", + "arguments": [ + "svcount_t png", + "int32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s32]_x4", + "arguments": [ + "svcount_t png", + "int32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ], + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s64]_x2", + "arguments": [ + "svcount_t png", + "int64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s64]_x4", + "arguments": [ + "svcount_t png", + "int64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ], + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s8]_x2", + "arguments": [ + "svcount_t png", + "int8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s8]_x4", + "arguments": [ + "svcount_t png", + "int8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ], + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u16]_x2", + "arguments": [ + "svcount_t png", + "uint16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u16]_x4", + "arguments": [ + "svcount_t png", + "uint16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ], + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u32]_x2", + "arguments": [ + "svcount_t png", + "uint32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u32]_x4", + "arguments": [ + "svcount_t png", + "uint32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ], + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u64]_x2", + "arguments": [ + "svcount_t png", + "uint64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u64]_x4", + "arguments": [ + "svcount_t png", + "uint64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ], + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u8]_x2", + "arguments": [ + "svcount_t png", + "uint8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u8]_x4", + "arguments": [ + "svcount_t png", + "uint8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FLOGB" + ], + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FLOGB" + ], + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FLOGB" + ], + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FLOGB" + ], + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FLOGB" + ], + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FLOGB" + ], + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmatch[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmatch[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmatch[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmatch[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxnmp[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ], + [ + "MOVPRFX", + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxnmp[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ], + [ + "MOVPRFX", + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxnmp[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ], + [ + "MOVPRFX", + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxnmp[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ], + [ + "MOVPRFX", + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxnmp[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ], + [ + "MOVPRFX", + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxnmp[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ], + [ + "MOVPRFX", + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnmv[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnmv[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnmv[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMV" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXP" + ], + [ + "MOVPRFX", + "FMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXP" + ], + [ + "MOVPRFX", + "FMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXP" + ], + [ + "MOVPRFX", + "FMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXP" + ], + [ + "MOVPRFX", + "FMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXP" + ], + [ + "MOVPRFX", + "FMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXP" + ], + [ + "MOVPRFX", + "FMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminnmp[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMP" + ], + [ + "MOVPRFX", + "FMINNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminnmp[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMP" + ], + [ + "MOVPRFX", + "FMINNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminnmp[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMP" + ], + [ + "MOVPRFX", + "FMINNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminnmp[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMP" + ], + [ + "MOVPRFX", + "FMINNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminnmp[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMP" + ], + [ + "MOVPRFX", + "FMINNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminnmp[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMP" + ], + [ + "MOVPRFX", + "FMINNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnmv[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnmv[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnmv[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMV" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINP" + ], + [ + "MOVPRFX", + "FMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINP" + ], + [ + "MOVPRFX", + "FMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINP" + ], + [ + "MOVPRFX", + "FMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINP" + ], + [ + "MOVPRFX", + "FMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINP" + ], + [ + "MOVPRFX", + "FMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINP" + ], + [ + "MOVPRFX", + "FMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla_lane[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla_lane[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmla_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmla_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmla_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmla_lane[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmla_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmla_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLALB" + ], + [ + "MOVPRFX", + "FMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLALB" + ], + [ + "MOVPRFX", + "FMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLALB" + ], + [ + "MOVPRFX", + "FMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLALT" + ], + [ + "MOVPRFX", + "FMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLALT" + ], + [ + "MOVPRFX", + "FMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLALT" + ], + [ + "MOVPRFX", + "FMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls_lane[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls_lane[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmls_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmls_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmls_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmls_lane[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmls_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmls_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLSLB" + ], + [ + "MOVPRFX", + "FMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLSLB" + ], + [ + "MOVPRFX", + "FMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLSLB" + ], + [ + "MOVPRFX", + "FMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLSLT" + ], + [ + "MOVPRFX", + "FMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLSLT" + ], + [ + "MOVPRFX", + "FMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLSLT" + ], + [ + "MOVPRFX", + "FMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmmla[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMMLA" + ], + [ + "MOVPRFX", + "FMMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmmla[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMMLA" + ], + [ + "MOVPRFX", + "FMMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmmla[_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMMLA" + ], + [ + "MOVPRFX", + "SMMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmmla[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMMLA" + ], + [ + "MOVPRFX", + "UMMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmov[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlb[_s16]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlb[_s32]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlb[_s64]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlb[_u16]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlb[_u32]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlb[_u64]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlt[_s16]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlt[_s32]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlt[_s64]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlt[_u16]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlt[_u32]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlt[_u64]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul_lane[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul_lane[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmul_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmul_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmul_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmul_lane[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmul_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmul_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb_lane[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb_lane[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb_lane[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb_lane[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt_lane[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt_lane[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt_lane[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt_lane[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnand[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NAND" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNEG" + ], + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNEG" + ], + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNEG" + ], + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNEG" + ], + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNEG" + ], + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNEG" + ], + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s8]_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnmatch[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NMATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnmatch[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NMATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnmatch[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NMATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnmatch[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NMATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnor[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s8]_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u8]_m", + "arguments": [ + "svuint8_t inactive", + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorn[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c16", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c16_x2", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c32", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c32_x2", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c64", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c64_x2", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c8", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c8_x2", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svpfalse[_b]", + "arguments": [], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PFALSE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpfalse_c", + "arguments": [], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PFALSE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svpfirst[_b]", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ptied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PFIRST" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmul[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmul[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb_pair[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb_pair[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb_pair[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb_pair[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb_pair[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb_pair[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt_pair[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt_pair[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt_pair[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt_pair[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt_pair[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt_pair[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svpnext_b16", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ptied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PNEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svpnext_b32", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ptied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PNEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svpnext_b64", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ptied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PNEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svpnext_b8", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ptied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PNEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb", + "arguments": [ + "svbool_t pg", + "const void *base", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather[_u32base]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather[_u32base]_offset", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather[_u64base]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather[_u64base]_offset", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather_[s32]offset", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint32_t offsets", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather_[s64]offset", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint64_t offsets", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather_[u32]offset", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint32_t offsets", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather_[u64]offset", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint64_t offsets", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_vnum", + "arguments": [ + "svbool_t pg", + "const void *base", + "int64_t vnum", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd", + "arguments": [ + "svbool_t pg", + "const void *base", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ], + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather[_u32base]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather[_u32base]_index", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather[_u64base]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather[_u64base]_index", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather_[s32]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint32_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather_[s64]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint64_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather_[u32]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint32_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather_[u64]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint64_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_vnum", + "arguments": [ + "svbool_t pg", + "const void *base", + "int64_t vnum", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ], + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh", + "arguments": [ + "svbool_t pg", + "const void *base", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ], + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather[_u32base]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather[_u32base]_index", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather[_u64base]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather[_u64base]_index", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather_[s32]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint32_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather_[s64]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint64_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather_[u32]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint32_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather_[u64]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint64_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_vnum", + "arguments": [ + "svbool_t pg", + "const void *base", + "int64_t vnum", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ], + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw", + "arguments": [ + "svbool_t pg", + "const void *base", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ], + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather[_u32base]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather[_u32base]_index", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather[_u64base]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather[_u64base]_index", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather_[s32]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint32_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather_[s64]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint64_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather_[u32]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint32_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather_[u64]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint64_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_vnum", + "arguments": [ + "svbool_t pg", + "const void *base", + "int64_t vnum", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ], + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_b16", + "arguments": [ + "svbool_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_b32", + "arguments": [ + "svbool_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_b64", + "arguments": [ + "svbool_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_b8", + "arguments": [ + "svbool_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_c16", + "arguments": [ + "svcount_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_c32", + "arguments": [ + "svcount_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_c64", + "arguments": [ + "svcount_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_c8", + "arguments": [ + "svcount_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptest_any", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "bool" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptest_first", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "bool" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptest_last", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "bool" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_b16", + "arguments": [], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_b32", + "arguments": [], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_b64", + "arguments": [], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_b8", + "arguments": [], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svptrue_c16", + "arguments": [], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svptrue_c32", + "arguments": [], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svptrue_c64", + "arguments": [], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svptrue_c8", + "arguments": [], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_pat_b16", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_pat_b32", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_pat_b64", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_pat_b8", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s8]_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_s8]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcadd[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQCADD" + ], + [ + "MOVPRFX", + "SQCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcadd[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQCADD" + ], + [ + "MOVPRFX", + "SQCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcadd[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQCADD" + ], + [ + "MOVPRFX", + "SQCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcadd[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQCADD" + ], + [ + "MOVPRFX", + "SQCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcvtn_s16[_s32_x2]", + "arguments": [ + "svint32x2_t zn" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "zn": { + "Z multi-vector": "{ Zreg2.S, Zreg3.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQCVTN" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcvtn_u16[_s32_x2]", + "arguments": [ + "svint32x2_t zn" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "zn": { + "Z multi-vector": "{ Zreg2.S, Zreg3.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQCVTUN" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcvtn_u16[_u32_x2]", + "arguments": [ + "svuint32x2_t zn" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "zn": { + "Z multi-vector": "{ Zreg2.S, Zreg3.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQCVTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd[_s64]", + "arguments": [ + "svint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECD" + ], + [ + "MOVPRFX", + "SQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd[_u64]", + "arguments": [ + "svuint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECD" + ], + [ + "MOVPRFX", + "UQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd_pat[_s64]", + "arguments": [ + "svint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECD" + ], + [ + "MOVPRFX", + "SQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd_pat[_u64]", + "arguments": [ + "svuint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECD" + ], + [ + "MOVPRFX", + "UQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech[_s16]", + "arguments": [ + "svint16_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECH" + ], + [ + "MOVPRFX", + "SQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech[_u16]", + "arguments": [ + "svuint16_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECH" + ], + [ + "MOVPRFX", + "UQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech_pat[_s16]", + "arguments": [ + "svint16_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECH" + ], + [ + "MOVPRFX", + "SQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech_pat[_u16]", + "arguments": [ + "svuint16_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECH" + ], + [ + "MOVPRFX", + "UQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s32]_b16", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s32]_b32", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s32]_b64", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s32]_b8", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s64]_b16", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s64]_b32", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s64]_b64", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s64]_b8", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u32]_b16", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u32]_b32", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u32]_b64", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u32]_b8", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u64]_b16", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u64]_b32", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u64]_b64", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u64]_b8", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_s16]", + "arguments": [ + "svint16_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ], + [ + "MOVPRFX", + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_s32]", + "arguments": [ + "svint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ], + [ + "MOVPRFX", + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_s64]", + "arguments": [ + "svint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ], + [ + "MOVPRFX", + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_u16]", + "arguments": [ + "svuint16_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ], + [ + "MOVPRFX", + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_u32]", + "arguments": [ + "svuint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ], + [ + "MOVPRFX", + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_u64]", + "arguments": [ + "svuint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ], + [ + "MOVPRFX", + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw[_s32]", + "arguments": [ + "svint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECW" + ], + [ + "MOVPRFX", + "SQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw[_u32]", + "arguments": [ + "svuint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECW" + ], + [ + "MOVPRFX", + "UQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw_pat[_s32]", + "arguments": [ + "svint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECW" + ], + [ + "MOVPRFX", + "SQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw_pat[_u32]", + "arguments": [ + "svuint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECW" + ], + [ + "MOVPRFX", + "UQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalbt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALBT" + ], + [ + "MOVPRFX", + "SQDMLALBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalbt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALBT" + ], + [ + "MOVPRFX", + "SQDMLALBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalbt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALBT" + ], + [ + "MOVPRFX", + "SQDMLALBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalbt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALBT" + ], + [ + "MOVPRFX", + "SQDMLALBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalbt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALBT" + ], + [ + "MOVPRFX", + "SQDMLALBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalbt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALBT" + ], + [ + "MOVPRFX", + "SQDMLALBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslbt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLBT" + ], + [ + "MOVPRFX", + "SQDMLSLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslbt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLBT" + ], + [ + "MOVPRFX", + "SQDMLSLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslbt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLBT" + ], + [ + "MOVPRFX", + "SQDMLSLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslbt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLBT" + ], + [ + "MOVPRFX", + "SQDMLSLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslbt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLBT" + ], + [ + "MOVPRFX", + "SQDMLSLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslbt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLBT" + ], + [ + "MOVPRFX", + "SQDMLSLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_n_s8]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb_lane[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb_lane[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt_lane[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt_lane[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd[_s64]", + "arguments": [ + "svint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCD" + ], + [ + "MOVPRFX", + "SQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd[_u64]", + "arguments": [ + "svuint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCD" + ], + [ + "MOVPRFX", + "UQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd_pat[_s64]", + "arguments": [ + "svint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCD" + ], + [ + "MOVPRFX", + "SQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd_pat[_u64]", + "arguments": [ + "svuint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCD" + ], + [ + "MOVPRFX", + "UQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch[_s16]", + "arguments": [ + "svint16_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCH" + ], + [ + "MOVPRFX", + "SQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch[_u16]", + "arguments": [ + "svuint16_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCH" + ], + [ + "MOVPRFX", + "UQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch_pat[_s16]", + "arguments": [ + "svint16_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCH" + ], + [ + "MOVPRFX", + "SQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch_pat[_u16]", + "arguments": [ + "svuint16_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCH" + ], + [ + "MOVPRFX", + "UQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s32]_b16", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s32]_b32", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s32]_b64", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s32]_b8", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s64]_b16", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s64]_b32", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s64]_b64", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s64]_b8", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u32]_b16", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u32]_b32", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u32]_b64", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u32]_b8", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u64]_b16", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u64]_b32", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u64]_b64", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u64]_b8", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_s16]", + "arguments": [ + "svint16_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ], + [ + "MOVPRFX", + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_s32]", + "arguments": [ + "svint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ], + [ + "MOVPRFX", + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_s64]", + "arguments": [ + "svint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ], + [ + "MOVPRFX", + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_u16]", + "arguments": [ + "svuint16_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ], + [ + "MOVPRFX", + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_u32]", + "arguments": [ + "svuint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ], + [ + "MOVPRFX", + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_u64]", + "arguments": [ + "svuint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ], + [ + "MOVPRFX", + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw[_s32]", + "arguments": [ + "svint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCW" + ], + [ + "MOVPRFX", + "SQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw[_u32]", + "arguments": [ + "svuint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCW" + ], + [ + "MOVPRFX", + "UQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw_pat[_s32]", + "arguments": [ + "svint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCW" + ], + [ + "MOVPRFX", + "SQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw_pat[_u32]", + "arguments": [ + "svuint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCW" + ], + [ + "MOVPRFX", + "UQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s8]_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdcmlah[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDCMLAH" + ], + [ + "MOVPRFX", + "SQRDCMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdcmlah[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDCMLAH" + ], + [ + "MOVPRFX", + "SQRDCMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdcmlah[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDCMLAH" + ], + [ + "MOVPRFX", + "SQRDCMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdcmlah[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDCMLAH" + ], + [ + "MOVPRFX", + "SQRDCMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdcmlah_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDCMLAH" + ], + [ + "MOVPRFX", + "SQRDCMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdcmlah_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDCMLAH" + ], + [ + "MOVPRFX", + "SQRDCMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_n_s8]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrn[_n]_s16[_s32_x2]", + "arguments": [ + "svint32x2_t zn", + "uint64_t imm" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "zn": { + "Z multi-vector": "{ Zreg2.S, Zreg3.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRN" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrn[_n]_u16[_u32_x2]", + "arguments": [ + "svuint32x2_t zn", + "uint64_t imm" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "zn": { + "Z multi-vector": "{ Zreg2.S, Zreg3.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRN" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrun[_n]_u16[_s32_x2]", + "arguments": [ + "svint32x2_t zn", + "uint64_t imm" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "zn": { + "Z multi-vector": "{ Zreg2.S, Zreg3.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUN" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrunb[_n_s16]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrunb[_n_s32]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrunb[_n_s64]", + "arguments": [ + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrunt[_n_s16]", + "arguments": [ + "svuint8_t even", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrunt[_n_s32]", + "arguments": [ + "svuint16_t even", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrunt[_n_s64]", + "arguments": [ + "svuint32_t even", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Zop1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrunb[_n_s16]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrunb[_n_s32]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrunb[_n_s64]", + "arguments": [ + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrunt[_n_s16]", + "arguments": [ + "svuint8_t even", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrunt[_n_s32]", + "arguments": [ + "svuint16_t even", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrunt[_n_s64]", + "arguments": [ + "svuint32_t even", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_s8]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnb[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnb[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnb[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnb[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQXTNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnb[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQXTNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnb[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQXTNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnt[_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnt[_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnt[_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnt[_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQXTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnt[_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQXTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnt[_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQXTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtunb[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtunb[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtunb[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtunt[_s16]", + "arguments": [ + "svuint8_t even", + "svint16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtunt[_s32]", + "arguments": [ + "svuint16_t even", + "svint32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtunt[_s64]", + "arguments": [ + "svuint32_t even", + "svint64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrax1[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RAX1" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrax1[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RAX1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s8]_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u8]_m", + "arguments": [ + "svuint8_t inactive", + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrdffr", + "arguments": [], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDFFR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrdffr_z", + "arguments": [ + "svbool_t pg" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDFFR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpe[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpe[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpe[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrecpe[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URECPE" + ], + [ + "MOVPRFX", + "URECPE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrecpe[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URECPE" + ], + [ + "MOVPRFX", + "URECPE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrecpe[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URECPE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecps[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecps[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecps[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPX" + ], + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPX" + ], + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPX" + ], + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPX" + ], + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPX" + ], + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPX" + ], + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svreinterpret[_b]", + "arguments": [ + "svcount_t count" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svreinterpret[_c]", + "arguments": [ + "svbool_t pg" + ], + "return_type": { + "value": "svcount_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev_b16", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev_b32", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev_b64", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev_b8", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f16]_m", + "arguments": [ + "svfloat16_t zd", + "svbool_t pg", + "svfloat16_t zn" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t zn" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t zn" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f32]_m", + "arguments": [ + "svfloat32_t zd", + "svbool_t pg", + "svfloat32_t zn" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t zn" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t zn" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f64]_m", + "arguments": [ + "svfloat64_t zd", + "svbool_t pg", + "svfloat64_t zn" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t zn" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t zn" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s16]_m", + "arguments": [ + "svint16_t zd", + "svbool_t pg", + "svint16_t zn" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t zn" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t zn" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s32]_m", + "arguments": [ + "svint32_t zd", + "svbool_t pg", + "svint32_t zn" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t zn" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t zn" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s64]_m", + "arguments": [ + "svint64_t zd", + "svbool_t pg", + "svint64_t zn" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t zn" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t zn" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s8]_m", + "arguments": [ + "svint8_t zd", + "svbool_t pg", + "svint8_t zn" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t zn" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t zn" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u16]_m", + "arguments": [ + "svuint16_t zd", + "svbool_t pg", + "svuint16_t zn" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t zn" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t zn" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u32]_m", + "arguments": [ + "svuint32_t zd", + "svbool_t pg", + "svuint32_t zn" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t zn" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t zn" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u64]_m", + "arguments": [ + "svuint64_t zd", + "svbool_t pg", + "svuint64_t zn" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t zn" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t zn" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u8]_m", + "arguments": [ + "svuint8_t zd", + "svbool_t pg", + "svuint8_t zn" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t zn" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t zn" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevw[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVW" + ], + [ + "MOVPRFX", + "REVW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevw[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVW" + ], + [ + "MOVPRFX", + "REVW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevw[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevw[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVW" + ], + [ + "MOVPRFX", + "REVW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevw[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVW" + ], + [ + "MOVPRFX", + "REVW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevw[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTA" + ], + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTA" + ], + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTA" + ], + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTA" + ], + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTA" + ], + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTA" + ], + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTI" + ], + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTI" + ], + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTI" + ], + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTI" + ], + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTI" + ], + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTI" + ], + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTM" + ], + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTM" + ], + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTM" + ], + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTM" + ], + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTM" + ], + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTM" + ], + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTN" + ], + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTN" + ], + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTN" + ], + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTN" + ], + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTN" + ], + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTN" + ], + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTP" + ], + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTP" + ], + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTP" + ], + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTP" + ], + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTP" + ], + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTP" + ], + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTX" + ], + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTX" + ], + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTX" + ], + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTX" + ], + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTX" + ], + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTX" + ], + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTZ" + ], + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTZ" + ], + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTZ" + ], + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTZ" + ], + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTZ" + ], + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTZ" + ], + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrsqrte[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRSQRTE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrsqrte[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRSQRTE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrsqrte[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRSQRTE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsqrte[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSQRTE" + ], + [ + "MOVPRFX", + "URSQRTE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsqrte[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSQRTE" + ], + [ + "MOVPRFX", + "URSQRTE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsqrte[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSQRTE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrsqrts[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRSQRTS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrsqrts[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRSQRTS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrsqrts[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRSQRTS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSRA" + ], + [ + "MOVPRFX", + "SRSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSRA" + ], + [ + "MOVPRFX", + "SRSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSRA" + ], + [ + "MOVPRFX", + "SRSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSRA" + ], + [ + "MOVPRFX", + "SRSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSRA" + ], + [ + "MOVPRFX", + "URSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSRA" + ], + [ + "MOVPRFX", + "URSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSRA" + ], + [ + "MOVPRFX", + "URSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSRA" + ], + [ + "MOVPRFX", + "URSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLB" + ], + [ + "MOVPRFX", + "SBCLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLB" + ], + [ + "MOVPRFX", + "SBCLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLB" + ], + [ + "MOVPRFX", + "SBCLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLB" + ], + [ + "MOVPRFX", + "SBCLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclt[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLT" + ], + [ + "MOVPRFX", + "SBCLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclt[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLT" + ], + [ + "MOVPRFX", + "SBCLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclt[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLT" + ], + [ + "MOVPRFX", + "SBCLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclt[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLT" + ], + [ + "MOVPRFX", + "SBCLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_b]", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svset2[_b]", + "arguments": [ + "svboolx2_t tuple", + "uint64_t imm_index", + "svbool_t x" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_f16]", + "arguments": [ + "svfloat16x2_t tuple", + "uint64_t imm_index", + "svfloat16_t x" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_f32]", + "arguments": [ + "svfloat32x2_t tuple", + "uint64_t imm_index", + "svfloat32_t x" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_f64]", + "arguments": [ + "svfloat64x2_t tuple", + "uint64_t imm_index", + "svfloat64_t x" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_s16]", + "arguments": [ + "svint16x2_t tuple", + "uint64_t imm_index", + "svint16_t x" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_s32]", + "arguments": [ + "svint32x2_t tuple", + "uint64_t imm_index", + "svint32_t x" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_s64]", + "arguments": [ + "svint64x2_t tuple", + "uint64_t imm_index", + "svint64_t x" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_s8]", + "arguments": [ + "svint8x2_t tuple", + "uint64_t imm_index", + "svint8_t x" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_u16]", + "arguments": [ + "svuint16x2_t tuple", + "uint64_t imm_index", + "svuint16_t x" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_u32]", + "arguments": [ + "svuint32x2_t tuple", + "uint64_t imm_index", + "svuint32_t x" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_u64]", + "arguments": [ + "svuint64x2_t tuple", + "uint64_t imm_index", + "svuint64_t x" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_u8]", + "arguments": [ + "svuint8x2_t tuple", + "uint64_t imm_index", + "svuint8_t x" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_f16]", + "arguments": [ + "svfloat16x3_t tuple", + "uint64_t imm_index", + "svfloat16_t x" + ], + "return_type": { + "value": "svfloat16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_f32]", + "arguments": [ + "svfloat32x3_t tuple", + "uint64_t imm_index", + "svfloat32_t x" + ], + "return_type": { + "value": "svfloat32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_f64]", + "arguments": [ + "svfloat64x3_t tuple", + "uint64_t imm_index", + "svfloat64_t x" + ], + "return_type": { + "value": "svfloat64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_s16]", + "arguments": [ + "svint16x3_t tuple", + "uint64_t imm_index", + "svint16_t x" + ], + "return_type": { + "value": "svint16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_s32]", + "arguments": [ + "svint32x3_t tuple", + "uint64_t imm_index", + "svint32_t x" + ], + "return_type": { + "value": "svint32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_s64]", + "arguments": [ + "svint64x3_t tuple", + "uint64_t imm_index", + "svint64_t x" + ], + "return_type": { + "value": "svint64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_s8]", + "arguments": [ + "svint8x3_t tuple", + "uint64_t imm_index", + "svint8_t x" + ], + "return_type": { + "value": "svint8x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_u16]", + "arguments": [ + "svuint16x3_t tuple", + "uint64_t imm_index", + "svuint16_t x" + ], + "return_type": { + "value": "svuint16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_u32]", + "arguments": [ + "svuint32x3_t tuple", + "uint64_t imm_index", + "svuint32_t x" + ], + "return_type": { + "value": "svuint32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_u64]", + "arguments": [ + "svuint64x3_t tuple", + "uint64_t imm_index", + "svuint64_t x" + ], + "return_type": { + "value": "svuint64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_u8]", + "arguments": [ + "svuint8x3_t tuple", + "uint64_t imm_index", + "svuint8_t x" + ], + "return_type": { + "value": "svuint8x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svset4[_b]", + "arguments": [ + "svboolx4_t tuple", + "uint64_t imm_index", + "svbool_t x" + ], + "return_type": { + "value": "svboolx4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_f16]", + "arguments": [ + "svfloat16x4_t tuple", + "uint64_t imm_index", + "svfloat16_t x" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_f32]", + "arguments": [ + "svfloat32x4_t tuple", + "uint64_t imm_index", + "svfloat32_t x" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_f64]", + "arguments": [ + "svfloat64x4_t tuple", + "uint64_t imm_index", + "svfloat64_t x" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_s16]", + "arguments": [ + "svint16x4_t tuple", + "uint64_t imm_index", + "svint16_t x" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_s32]", + "arguments": [ + "svint32x4_t tuple", + "uint64_t imm_index", + "svint32_t x" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_s64]", + "arguments": [ + "svint64x4_t tuple", + "uint64_t imm_index", + "svint64_t x" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_s8]", + "arguments": [ + "svint8x4_t tuple", + "uint64_t imm_index", + "svint8_t x" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_u16]", + "arguments": [ + "svuint16x4_t tuple", + "uint64_t imm_index", + "svuint16_t x" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_u32]", + "arguments": [ + "svuint32x4_t tuple", + "uint64_t imm_index", + "svuint32_t x" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_u64]", + "arguments": [ + "svuint64x4_t tuple", + "uint64_t imm_index", + "svuint64_t x" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_u8]", + "arguments": [ + "svuint8x4_t tuple", + "uint64_t imm_index", + "svuint8_t x" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsetffr", + "arguments": [], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SETFFR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllb[_n_s16]", + "arguments": [ + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllb[_n_s32]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllb[_n_s64]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllb[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllb[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllb[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllt[_n_s16]", + "arguments": [ + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllt[_n_s32]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllt[_n_s64]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllt[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllt[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllt[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsm4e[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SM4E" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsm4ekey[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SM4EKEY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSQRT" + ], + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSQRT" + ], + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSQRT" + ], + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSQRT" + ], + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSQRT" + ], + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSQRT" + ], + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSRA" + ], + [ + "MOVPRFX", + "SSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSRA" + ], + [ + "MOVPRFX", + "SSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSRA" + ], + [ + "MOVPRFX", + "SSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSRA" + ], + [ + "MOVPRFX", + "SSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USRA" + ], + [ + "MOVPRFX", + "USRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USRA" + ], + [ + "MOVPRFX", + "USRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USRA" + ], + [ + "MOVPRFX", + "USRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USRA" + ], + [ + "MOVPRFX", + "USRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "svfloat16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_f16_x2]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "svfloat16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_f16_x4]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "svfloat16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_f32_x2]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "svfloat32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_f32_x4]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "svfloat32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_f64_x2]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "svfloat64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_f64_x4]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "svfloat64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s16_x2]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "svint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s16_x4]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "svint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s32_x2]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "svint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s32_x4]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "svint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s64_x2]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "svint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s64_x4]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "svint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s8_x2]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "svint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s8_x4]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "svint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u16_x2]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "svuint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u16_x4]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "svuint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u32_x2]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "svuint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u32_x4]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "svuint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u64_x2]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "svuint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u64_x4]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "svuint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u8_x2]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "svuint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u8_x4]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "svuint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base]_index[_f32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base]_index[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base]_index[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base]_offset[_f32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base]_offset[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base]_offset[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base_f32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base]_index[_f64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base]_index[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base]_index[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base]_offset[_f64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base_f64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s32]index[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svint32_t indices", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s32]index[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32_t indices", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s32]index[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svint32_t indices", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svint32_t offsets", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s64]index[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svint64_t indices", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svint64_t offsets", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u32]index[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svuint32_t indices", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u32]index[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint32_t indices", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u32]index[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32_t indices", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svuint32_t offsets", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u64]index[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svuint64_t indices", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svuint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svuint64_t offsets", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "int64_t vnum", + "svfloat16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_f16_x2]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "int64_t vnum", + "svfloat16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_f16_x4]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "int64_t vnum", + "svfloat16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "int64_t vnum", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_f32_x2]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "int64_t vnum", + "svfloat32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_f32_x4]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "int64_t vnum", + "svfloat32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "int64_t vnum", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_f64_x2]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "int64_t vnum", + "svfloat64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_f64_x4]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "int64_t vnum", + "svfloat64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s16_x2]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "int64_t vnum", + "svint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s16_x4]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "int64_t vnum", + "svint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "int64_t vnum", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s32_x2]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "int64_t vnum", + "svint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s32_x4]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "int64_t vnum", + "svint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "int64_t vnum", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s64_x2]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "int64_t vnum", + "svint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s64_x4]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "int64_t vnum", + "svint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s8_x2]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "int64_t vnum", + "svint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s8_x4]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "int64_t vnum", + "svint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u16_x2]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "int64_t vnum", + "svuint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u16_x4]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "int64_t vnum", + "svuint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "int64_t vnum", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u32_x2]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "int64_t vnum", + "svuint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u32_x4]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "int64_t vnum", + "svuint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "int64_t vnum", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u64_x2]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "int64_t vnum", + "svuint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u64_x4]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "int64_t vnum", + "svuint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u8_x2]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "int64_t vnum", + "svuint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u8_x4]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "int64_t vnum", + "svuint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b[_s16]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b[_s32]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b[_s64]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b[_u16]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b[_u32]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b[_u64]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u32base]_offset[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u32base]_offset[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u32base_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u32base_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[s32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[s32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svuint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u32base]_index[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u32base]_index[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u32base]_offset[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u32base]_offset[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u32base_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u32base_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u64base]_index[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u64base]_index[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s32]index[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint32_t indices", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s32]index[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svint32_t indices", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u32]index[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint32_t indices", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u32]index[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint32_t indices", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter[_u64base]_index[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter[_u64base]_index[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "int64_t vnum", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "int64_t vnum", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "svfloat16x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2H" + ], + [ + "ST2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svfloat32x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2W" + ], + [ + "ST2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svfloat64x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2D" + ], + [ + "ST2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint16x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2H" + ], + [ + "ST2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2W" + ], + [ + "ST2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2D" + ], + [ + "ST2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint8x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B, Zdata1.B}" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2B" + ], + [ + "ST2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint16x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2H" + ], + [ + "ST2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2W" + ], + [ + "ST2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2D" + ], + [ + "ST2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint8x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B, Zdata1.B}" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2B" + ], + [ + "ST2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "int64_t vnum", + "svfloat16x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2H" + ], + [ + "ST2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "int64_t vnum", + "svfloat32x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2W" + ], + [ + "ST2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "int64_t vnum", + "svfloat64x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2D" + ], + [ + "ST2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint16x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2H" + ], + [ + "ST2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "int64_t vnum", + "svint32x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2W" + ], + [ + "ST2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "int64_t vnum", + "svint64x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2D" + ], + [ + "ST2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint8x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B, Zdata1.B}" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2B" + ], + [ + "ST2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint16x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2H" + ], + [ + "ST2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "int64_t vnum", + "svuint32x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2W" + ], + [ + "ST2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "int64_t vnum", + "svuint64x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2D" + ], + [ + "ST2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint8x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B, Zdata1.B}" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2B" + ], + [ + "ST2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "svfloat16x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata2.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3H" + ], + [ + "ST3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svfloat32x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata2.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3W" + ], + [ + "ST3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svfloat64x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata2.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3D" + ], + [ + "ST3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint16x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata2.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3H" + ], + [ + "ST3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata2.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3W" + ], + [ + "ST3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata2.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3D" + ], + [ + "ST3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint8x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata2.B}" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3B" + ], + [ + "ST3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint16x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata2.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3H" + ], + [ + "ST3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata2.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3W" + ], + [ + "ST3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata2.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3D" + ], + [ + "ST3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint8x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata2.B}" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3B" + ], + [ + "ST3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "int64_t vnum", + "svfloat16x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata2.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3H" + ], + [ + "ST3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "int64_t vnum", + "svfloat32x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata2.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3W" + ], + [ + "ST3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "int64_t vnum", + "svfloat64x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata2.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3D" + ], + [ + "ST3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint16x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata2.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3H" + ], + [ + "ST3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "int64_t vnum", + "svint32x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata2.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3W" + ], + [ + "ST3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "int64_t vnum", + "svint64x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata2.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3D" + ], + [ + "ST3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint8x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata2.B}" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3B" + ], + [ + "ST3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint16x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata2.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3H" + ], + [ + "ST3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "int64_t vnum", + "svuint32x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata2.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3W" + ], + [ + "ST3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "int64_t vnum", + "svuint64x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata2.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3D" + ], + [ + "ST3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint8x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata2.B}" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3B" + ], + [ + "ST3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "svfloat16x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata3.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4H" + ], + [ + "ST4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svfloat32x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata3.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4W" + ], + [ + "ST4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svfloat64x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata3.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4D" + ], + [ + "ST4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint16x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata3.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4H" + ], + [ + "ST4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata3.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4W" + ], + [ + "ST4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata3.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4D" + ], + [ + "ST4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint8x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata3.B}" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4B" + ], + [ + "ST4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint16x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata3.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4H" + ], + [ + "ST4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata3.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4W" + ], + [ + "ST4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata3.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4D" + ], + [ + "ST4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint8x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata3.B}" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4B" + ], + [ + "ST4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "int64_t vnum", + "svfloat16x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata3.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4H" + ], + [ + "ST4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "int64_t vnum", + "svfloat32x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata3.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4W" + ], + [ + "ST4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "int64_t vnum", + "svfloat64x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata3.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4D" + ], + [ + "ST4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint16x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata3.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4H" + ], + [ + "ST4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "int64_t vnum", + "svint32x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata3.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4W" + ], + [ + "ST4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "int64_t vnum", + "svint64x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata3.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4D" + ], + [ + "ST4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint8x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata3.B}" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4B" + ], + [ + "ST4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint16x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata3.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4H" + ], + [ + "ST4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "int64_t vnum", + "svuint32x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata3.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4W" + ], + [ + "ST4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "int64_t vnum", + "svuint64x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata3.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4D" + ], + [ + "ST4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint8x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata3.B}" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4B" + ], + [ + "ST4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "svfloat16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ], + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_f16_x2]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "svfloat16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_f16_x4]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "svfloat16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ], + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_f32_x2]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "svfloat32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_f32_x4]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "svfloat32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ], + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_f64_x2]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "svfloat64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_f64_x4]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "svfloat64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ], + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s16_x2]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "svint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s16_x4]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "svint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ], + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s32_x2]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "svint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s32_x4]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "svint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ], + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s64_x2]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "svint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s64_x4]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "svint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ], + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s8_x2]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "svint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s8_x4]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "svint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ], + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u16_x2]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "svuint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u16_x4]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "svuint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ], + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u32_x2]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "svuint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u32_x4]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "svuint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ], + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u64_x2]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "svuint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u64_x4]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "svuint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ], + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u8_x2]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "svuint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u8_x4]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "svuint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base]_index[_f32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base]_index[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base]_index[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base]_offset[_f32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base]_offset[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base]_offset[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base_f32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base]_index[_f64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base]_index[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base]_index[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base]_offset[_f64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base_f64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[s64]index[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svint64_t indices", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[s64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svint64_t offsets", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svuint32_t offsets", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u64]index[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svuint64_t indices", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svuint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svuint64_t offsets", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "int64_t vnum", + "svfloat16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ], + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_f16_x2]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "int64_t vnum", + "svfloat16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_f16_x4]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "int64_t vnum", + "svfloat16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "int64_t vnum", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ], + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_f32_x2]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "int64_t vnum", + "svfloat32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_f32_x4]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "int64_t vnum", + "svfloat32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "int64_t vnum", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ], + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_f64_x2]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "int64_t vnum", + "svfloat64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_f64_x4]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "int64_t vnum", + "svfloat64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ], + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s16_x2]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "int64_t vnum", + "svint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s16_x4]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "int64_t vnum", + "svint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "int64_t vnum", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ], + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s32_x2]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "int64_t vnum", + "svint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s32_x4]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "int64_t vnum", + "svint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "int64_t vnum", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ], + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s64_x2]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "int64_t vnum", + "svint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s64_x4]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "int64_t vnum", + "svint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ], + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s8_x2]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "int64_t vnum", + "svint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s8_x4]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "int64_t vnum", + "svint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ], + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u16_x2]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "int64_t vnum", + "svuint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u16_x4]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "int64_t vnum", + "svuint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "int64_t vnum", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ], + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u32_x2]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "int64_t vnum", + "svuint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u32_x4]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "int64_t vnum", + "svuint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "int64_t vnum", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ], + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u64_x2]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "int64_t vnum", + "svuint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u64_x4]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "int64_t vnum", + "svuint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ], + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u8_x2]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "int64_t vnum", + "svuint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u8_x4]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "int64_t vnum", + "svuint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u32base]_offset[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u32base]_offset[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u32base_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u32base_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svuint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u32base]_index[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u32base]_index[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u32base]_offset[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u32base]_offset[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u32base_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u32base_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u64base]_index[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u64base]_index[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter[_u64base]_index[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter[_u64base]_index[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FSUB" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FSUB" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FSUB" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublbt[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublbt[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublbt[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublbt[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublbt[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublbt[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubltb[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubltb[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubltb[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubltb[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubltb[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubltb[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_n_s16]", + "arguments": [ + "svint16_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_n_s32]", + "arguments": [ + "svint32_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_n_s64]", + "arguments": [ + "svint64_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_n_s16]", + "arguments": [ + "svint16_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_n_s32]", + "arguments": [ + "svint32_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_n_s64]", + "arguments": [ + "svint64_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsudot[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USDOT" + ], + [ + "MOVPRFX", + "USDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsudot[_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USDOT" + ], + [ + "MOVPRFX", + "USDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsudot_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "svuint8_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUDOT" + ], + [ + "MOVPRFX", + "SUDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_f16]", + "arguments": [ + "svfloat16x2_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_f32]", + "arguments": [ + "svfloat32x2_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_f64]", + "arguments": [ + "svfloat64x2_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_s16]", + "arguments": [ + "svint16x2_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_s32]", + "arguments": [ + "svint32x2_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_s64]", + "arguments": [ + "svint64x2_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_s8]", + "arguments": [ + "svint8x2_t data", + "svuint8_t indices" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.B, Zdata1.B}" + }, + "indices": { + "register": "Zindices.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_u16]", + "arguments": [ + "svuint16x2_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_u32]", + "arguments": [ + "svuint32x2_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_u64]", + "arguments": [ + "svuint64x2_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_u8]", + "arguments": [ + "svuint8x2_t data", + "svuint8_t indices" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.B, Zdata1.B}" + }, + "indices": { + "register": "Zindices.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_f16]", + "arguments": [ + "svfloat16_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_f32]", + "arguments": [ + "svfloat32_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_f64]", + "arguments": [ + "svfloat64_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_s16]", + "arguments": [ + "svint16_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_s32]", + "arguments": [ + "svint32_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_s64]", + "arguments": [ + "svint64_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_s8]", + "arguments": [ + "svint8_t data", + "svuint8_t indices" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "indices": { + "register": "Zindices.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_u16]", + "arguments": [ + "svuint16_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_u32]", + "arguments": [ + "svuint32_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_u64]", + "arguments": [ + "svuint64_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_u8]", + "arguments": [ + "svuint8_t data", + "svuint8_t indices" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "indices": { + "register": "Zindices.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_f16]", + "arguments": [ + "svfloat16_t fallback", + "svfloat16_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Ztied.H" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_f32]", + "arguments": [ + "svfloat32_t fallback", + "svfloat32_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Ztied.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_f64]", + "arguments": [ + "svfloat64_t fallback", + "svfloat64_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Ztied.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_s16]", + "arguments": [ + "svint16_t fallback", + "svint16_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Ztied.H" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_s32]", + "arguments": [ + "svint32_t fallback", + "svint32_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Ztied.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_s64]", + "arguments": [ + "svint64_t fallback", + "svint64_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Ztied.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_s8]", + "arguments": [ + "svint8_t fallback", + "svint8_t data", + "svuint8_t indices" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Ztied.B" + }, + "indices": { + "register": "Zindices.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_u16]", + "arguments": [ + "svuint16_t fallback", + "svuint16_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Ztied.H" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_u32]", + "arguments": [ + "svuint32_t fallback", + "svuint32_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Ztied.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_u64]", + "arguments": [ + "svuint64_t fallback", + "svuint64_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Ztied.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_u8]", + "arguments": [ + "svuint8_t fallback", + "svuint8_t data", + "svuint8_t indices" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Ztied.B" + }, + "indices": { + "register": "Zindices.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtmad[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTMAD" + ], + [ + "MOVPRFX", + "FTMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtmad[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTMAD" + ], + [ + "MOVPRFX", + "FTMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtmad[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTMAD" + ], + [ + "MOVPRFX", + "FTMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1_b16", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.H" + }, + "op2": { + "register": "Pop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1_b32", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.S" + }, + "op2": { + "register": "Pop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1_b64", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.D" + }, + "op2": { + "register": "Pop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1_b8", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2_b16", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.H" + }, + "op2": { + "register": "Pop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2_b32", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.S" + }, + "op2": { + "register": "Pop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2_b64", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.D" + }, + "op2": { + "register": "Pop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2_b8", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtsmul[_f16]", + "arguments": [ + "svfloat16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTSMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtsmul[_f32]", + "arguments": [ + "svfloat32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTSMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtsmul[_f64]", + "arguments": [ + "svfloat64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTSMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtssel[_f16]", + "arguments": [ + "svfloat16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTSSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtssel[_f32]", + "arguments": [ + "svfloat32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTSSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtssel[_f64]", + "arguments": [ + "svfloat64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTSSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svundef2_b", + "arguments": [], + "return_type": { + "value": "svboolx2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_f16", + "arguments": [], + "return_type": { + "value": "svfloat16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_f32", + "arguments": [], + "return_type": { + "value": "svfloat32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_f64", + "arguments": [], + "return_type": { + "value": "svfloat64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_s16", + "arguments": [], + "return_type": { + "value": "svint16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_s32", + "arguments": [], + "return_type": { + "value": "svint32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_s64", + "arguments": [], + "return_type": { + "value": "svint64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_s8", + "arguments": [], + "return_type": { + "value": "svint8x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_u16", + "arguments": [], + "return_type": { + "value": "svuint16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_u32", + "arguments": [], + "return_type": { + "value": "svuint32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_u64", + "arguments": [], + "return_type": { + "value": "svuint64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_u8", + "arguments": [], + "return_type": { + "value": "svuint8x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_f16", + "arguments": [], + "return_type": { + "value": "svfloat16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_f32", + "arguments": [], + "return_type": { + "value": "svfloat32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_f64", + "arguments": [], + "return_type": { + "value": "svfloat64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_s16", + "arguments": [], + "return_type": { + "value": "svint16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_s32", + "arguments": [], + "return_type": { + "value": "svint32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_s64", + "arguments": [], + "return_type": { + "value": "svint64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_s8", + "arguments": [], + "return_type": { + "value": "svint8x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_u16", + "arguments": [], + "return_type": { + "value": "svuint16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_u32", + "arguments": [], + "return_type": { + "value": "svuint32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_u64", + "arguments": [], + "return_type": { + "value": "svuint64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_u8", + "arguments": [], + "return_type": { + "value": "svuint8x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svundef4_b", + "arguments": [], + "return_type": { + "value": "svboolx4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_f16", + "arguments": [], + "return_type": { + "value": "svfloat16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_f32", + "arguments": [], + "return_type": { + "value": "svfloat32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_f64", + "arguments": [], + "return_type": { + "value": "svfloat64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_s16", + "arguments": [], + "return_type": { + "value": "svint16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_s32", + "arguments": [], + "return_type": { + "value": "svint32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_s64", + "arguments": [], + "return_type": { + "value": "svint64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_s8", + "arguments": [], + "return_type": { + "value": "svint8x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_u16", + "arguments": [], + "return_type": { + "value": "svuint16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_u32", + "arguments": [], + "return_type": { + "value": "svuint32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_u64", + "arguments": [], + "return_type": { + "value": "svuint64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_u8", + "arguments": [], + "return_type": { + "value": "svuint8x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_f16", + "arguments": [], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_f32", + "arguments": [], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_f64", + "arguments": [], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_s16", + "arguments": [], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_s32", + "arguments": [], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_s64", + "arguments": [], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_s8", + "arguments": [], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_u16", + "arguments": [], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_u32", + "arguments": [], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_u64", + "arguments": [], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_u8", + "arguments": [], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_b]", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_s16]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_s32]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_s64]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_u16]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_u32]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_u64]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_b]", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_s16]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_s32]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_s64]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_u16]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_u32]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_u64]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svusdot[_n_s32]", + "arguments": [ + "svint32_t op1", + "svuint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USDOT" + ], + [ + "MOVPRFX", + "USDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svusdot[_s32]", + "arguments": [ + "svint32_t op1", + "svuint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USDOT" + ], + [ + "MOVPRFX", + "USDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svusdot_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svuint8_t op2", + "svint8_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USDOT" + ], + [ + "MOVPRFX", + "USDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svusmmla[_s32]", + "arguments": [ + "svint32_t op1", + "svuint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USMMLA" + ], + [ + "MOVPRFX", + "USMMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1_b16", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.H" + }, + "op2": { + "register": "Pop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1_b32", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.S" + }, + "op2": { + "register": "Pop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1_b64", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.D" + }, + "op2": { + "register": "Pop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1_b8", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2_b16", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.H" + }, + "op2": { + "register": "Pop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2_b32", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.S" + }, + "op2": { + "register": "Pop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2_b64", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.D" + }, + "op2": { + "register": "Pop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2_b8", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b16[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b16[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b16[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b16[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b16[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b16[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b32[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b32[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b32[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b32[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b32[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b32[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b64[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b64[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b64[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b64[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b64[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b64[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b8[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b8[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b8[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b8[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b8[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b8[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c16[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c16[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c32[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c32[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c64[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c64[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c8[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c8[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b16[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b16[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b16[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b16[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b16[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b16[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b32[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b32[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b32[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b32[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b32[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b32[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b64[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b64[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b64[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b64[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b64[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b64[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b8[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b8[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b8[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b8[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b8[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b8[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c16[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c16[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c32[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c32[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c64[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c64[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c8[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c8[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b16[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b16[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b16[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b16[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b16[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b16[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b32[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b32[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b32[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b32[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b32[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b32[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b64[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b64[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b64[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b64[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b64[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b64[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b8[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b8[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b8[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b8[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b8[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b8[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c16[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c16[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c32[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c32[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c64[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c64[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c8[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c8[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b16[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b16[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b16[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b16[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b16[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b16[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b32[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b32[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b32[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b32[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b32[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b32[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b64[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b64[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b64[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b64[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b64[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b64[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b8[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b8[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b8[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b8[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b8[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b8[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c16[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c16[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c32[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c32[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c64[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c64[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c8[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c8[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_f16]", + "arguments": [ + "const float16_t *op1", + "const float16_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_f32]", + "arguments": [ + "const float32_t *op1", + "const float32_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_f64]", + "arguments": [ + "const float64_t *op1", + "const float64_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_s16]", + "arguments": [ + "const int16_t *op1", + "const int16_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_s32]", + "arguments": [ + "const int32_t *op1", + "const int32_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_s64]", + "arguments": [ + "const int64_t *op1", + "const int64_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_s8]", + "arguments": [ + "const int8_t *op1", + "const int8_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_u16]", + "arguments": [ + "const uint16_t *op1", + "const uint16_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_u32]", + "arguments": [ + "const uint32_t *op1", + "const uint32_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_u64]", + "arguments": [ + "const uint64_t *op1", + "const uint64_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_u8]", + "arguments": [ + "const uint8_t *op1", + "const uint8_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_f16]", + "arguments": [ + "const float16_t *op1", + "const float16_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_f32]", + "arguments": [ + "const float32_t *op1", + "const float32_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_f64]", + "arguments": [ + "const float64_t *op1", + "const float64_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_s16]", + "arguments": [ + "const int16_t *op1", + "const int16_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_s32]", + "arguments": [ + "const int32_t *op1", + "const int32_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_s64]", + "arguments": [ + "const int64_t *op1", + "const int64_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_s8]", + "arguments": [ + "const int8_t *op1", + "const int8_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_u16]", + "arguments": [ + "const uint16_t *op1", + "const uint16_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_u32]", + "arguments": [ + "const uint32_t *op1", + "const uint32_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_u64]", + "arguments": [ + "const uint64_t *op1", + "const uint64_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_u8]", + "arguments": [ + "const uint8_t *op1", + "const uint8_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwrffr", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WRFFR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1_b16", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.H" + }, + "op2": { + "register": "Pop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1_b32", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.S" + }, + "op2": { + "register": "Pop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1_b64", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.D" + }, + "op2": { + "register": "Pop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1_b8", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2_b16", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.H" + }, + "op2": { + "register": "Pop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2_b32", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.S" + }, + "op2": { + "register": "Pop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2_b64", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.D" + }, + "op2": { + "register": "Pop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2_b8", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaba_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b", + "int16x4_t c" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaba_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b", + "int32x2_t c" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaba_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b", + "int8x8_t c" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaba_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b", + "uint16x4_t c" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaba_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b", + "uint32x2_t c" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaba_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b", + "uint8x8_t c" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_high_s16", + "arguments": [ + "int32x4_t a", + "int16x8_t b", + "int16x8_t c" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABAL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_high_s32", + "arguments": [ + "int64x2_t a", + "int32x4_t b", + "int32x4_t c" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABAL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_high_s8", + "arguments": [ + "int16x8_t a", + "int8x16_t b", + "int8x16_t c" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABAL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_high_u16", + "arguments": [ + "uint32x4_t a", + "uint16x8_t b", + "uint16x8_t c" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABAL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_high_u32", + "arguments": [ + "uint64x2_t a", + "uint32x4_t b", + "uint32x4_t c" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABAL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_high_u8", + "arguments": [ + "uint16x8_t a", + "uint8x16_t b", + "uint8x16_t c" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABAL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_s16", + "arguments": [ + "int32x4_t a", + "int16x4_t b", + "int16x4_t c" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABAL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_s32", + "arguments": [ + "int64x2_t a", + "int32x2_t b", + "int32x2_t c" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABAL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_s8", + "arguments": [ + "int16x8_t a", + "int8x8_t b", + "int8x8_t c" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABAL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_u16", + "arguments": [ + "uint32x4_t a", + "uint16x4_t b", + "uint16x4_t c" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABAL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_u32", + "arguments": [ + "uint64x2_t a", + "uint32x2_t b", + "uint32x2_t c" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABAL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabal_u8", + "arguments": [ + "uint16x8_t a", + "uint8x8_t b", + "uint8x8_t c" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABAL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabaq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b", + "int16x8_t c" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabaq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b", + "int32x4_t c" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabaq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b", + "int8x16_t c" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabaq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b", + "uint16x8_t c" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabaq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabaq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b", + "uint8x16_t c" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabd_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabd_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabd_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabd_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabd_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabd_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabd_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabd_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabd_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdd_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdh_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_high_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_high_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_high_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_high_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_high_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_high_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdl_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabdq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabds_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabs_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabs_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabs_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabs_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabs_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabs_s64", + "arguments": [ + "int64x1_t a" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabs_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabsd_s64", + "arguments": [ + "int64_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabsh_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabsq_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabsq_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabsq_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabsq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabsq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabsq_s64", + "arguments": [ + "int64x2_t a" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vabsq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ABS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_p16", + "arguments": [ + "poly16x4_t a", + "poly16x4_t b" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_p64", + "arguments": [ + "poly64x1_t a", + "poly64x1_t b" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_p8", + "arguments": [ + "poly8x8_t a", + "poly8x8_t b" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vadd_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddd_s64", + "arguments": [ + "int64_t a", + "int64_t b" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddd_u64", + "arguments": [ + "uint64_t a", + "uint64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddh_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_high_s16", + "arguments": [ + "int8x8_t r", + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_high_s32", + "arguments": [ + "int16x4_t r", + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_high_s64", + "arguments": [ + "int32x2_t r", + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_high_u16", + "arguments": [ + "uint8x8_t r", + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_high_u32", + "arguments": [ + "uint16x4_t r", + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_high_u64", + "arguments": [ + "uint32x2_t r", + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADDHN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADDHN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADDHN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADDHN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADDHN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddhn_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADDHN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_high_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_high_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_high_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_high_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_high_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_high_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SADDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SADDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SADDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UADDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UADDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddl_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UADDL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlv_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlv_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlv_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlv_u16", + "arguments": [ + "uint16x4_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlv_u32", + "arguments": [ + "uint32x2_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlv_u8", + "arguments": [ + "uint8x8_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlvq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlvq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlvq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlvq_u16", + "arguments": [ + "uint16x8_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlvq_u32", + "arguments": [ + "uint32x4_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddlvq_u8", + "arguments": [ + "uint8x16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_p128", + "arguments": [ + "poly128_t a", + "poly128_t b" + ], + "return_type": { + "value": "poly128_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_p16", + "arguments": [ + "poly16x8_t a", + "poly16x8_t b" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_p64", + "arguments": [ + "poly64x2_t a", + "poly64x2_t b" + ], + "return_type": { + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_p8", + "arguments": [ + "poly8x16_t a", + "poly8x16_t b" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddv_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADDP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddv_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddv_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddv_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddv_u16", + "arguments": [ + "uint16x4_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddv_u32", + "arguments": [ + "uint32x2_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddv_u8", + "arguments": [ + "uint8x8_t a" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADDP", + "FADDP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADDP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_s64", + "arguments": [ + "int64x2_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_u16", + "arguments": [ + "uint16x8_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_u32", + "arguments": [ + "uint32x4_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_u64", + "arguments": [ + "uint64x2_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddvq_u8", + "arguments": [ + "uint8x16_t a" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_high_s16", + "arguments": [ + "int32x4_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDW2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_high_s32", + "arguments": [ + "int64x2_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDW2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_high_s8", + "arguments": [ + "int16x8_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDW2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_high_u16", + "arguments": [ + "uint32x4_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDW2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_high_u32", + "arguments": [ + "uint64x2_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDW2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_high_u8", + "arguments": [ + "uint16x8_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDW2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_s16", + "arguments": [ + "int32x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SADDW" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_s32", + "arguments": [ + "int64x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SADDW" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_s8", + "arguments": [ + "int16x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SADDW" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_u16", + "arguments": [ + "uint32x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UADDW" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_u32", + "arguments": [ + "uint64x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UADDW" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaddw_u8", + "arguments": [ + "uint16x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UADDW" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaesdq_u8", + "arguments": [ + "uint8x16_t data", + "uint8x16_t key" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Vd.16B" + }, + "key": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "AESD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaeseq_u8", + "arguments": [ + "uint8x16_t data", + "uint8x16_t key" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Vd.16B" + }, + "key": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "AESE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaesimcq_u8", + "arguments": [ + "uint8x16_t data" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "AESIMC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaesmcq_u8", + "arguments": [ + "uint8x16_t data" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "AESMC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamax_f16", + "arguments": [ + "float16x4_t vn", + "float16x4_t vm" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.4H" + }, + "vn": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamax_f32", + "arguments": [ + "float32x2_t vn", + "float32x2_t vm" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.2S" + }, + "vn": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamaxq_f16", + "arguments": [ + "float16x8_t vn", + "float16x8_t vm" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.8H" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamaxq_f32", + "arguments": [ + "float32x4_t vn", + "float32x4_t vm" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.4S" + }, + "vn": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamaxq_f64", + "arguments": [ + "float64x2_t vn", + "float64x2_t vm" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.2D" + }, + "vn": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamin_f16", + "arguments": [ + "float16x4_t vn", + "float16x4_t vm" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.4H" + }, + "vn": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMIN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamin_f32", + "arguments": [ + "float32x2_t vn", + "float32x2_t vm" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.2S" + }, + "vn": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMIN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaminq_f16", + "arguments": [ + "float16x8_t vn", + "float16x8_t vm" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.8H" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMIN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaminq_f32", + "arguments": [ + "float32x4_t vn", + "float32x4_t vm" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.4S" + }, + "vn": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMIN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaminq_f64", + "arguments": [ + "float64x2_t vn", + "float64x2_t vm" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.2D" + }, + "vn": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMIN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vand_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vand_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vand_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vand_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vand_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vand_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vand_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vand_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vandq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vandq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vandq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vandq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vandq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vandq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vandq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vandq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbcaxq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b", + "int16x8_t c" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbcaxq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b", + "int32x4_t c" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbcaxq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b", + "int64x2_t c" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbcaxq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b", + "int8x16_t c" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbcaxq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b", + "uint16x8_t c" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbcaxq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbcaxq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b", + "uint64x2_t c" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbcaxq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b", + "uint8x16_t c" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbic_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbic_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbic_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbic_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbic_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbic_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbic_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbic_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbicq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbicq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbicq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbicq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbicq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbicq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbicq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbicq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_f16", + "arguments": [ + "uint16x4_t a", + "float16x4_t b", + "float16x4_t c" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_f32", + "arguments": [ + "uint32x2_t a", + "float32x2_t b", + "float32x2_t c" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_f64", + "arguments": [ + "uint64x1_t a", + "float64x1_t b", + "float64x1_t c" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_p16", + "arguments": [ + "uint16x4_t a", + "poly16x4_t b", + "poly16x4_t c" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_p64", + "arguments": [ + "poly64x1_t a", + "poly64x1_t b", + "poly64x1_t c" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_p8", + "arguments": [ + "uint8x8_t a", + "poly8x8_t b", + "poly8x8_t c" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_s16", + "arguments": [ + "uint16x4_t a", + "int16x4_t b", + "int16x4_t c" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_s32", + "arguments": [ + "uint32x2_t a", + "int32x2_t b", + "int32x2_t c" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_s64", + "arguments": [ + "uint64x1_t a", + "int64x1_t b", + "int64x1_t c" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_s8", + "arguments": [ + "uint8x8_t a", + "int8x8_t b", + "int8x8_t c" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b", + "uint16x4_t c" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b", + "uint32x2_t c" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b", + "uint64x1_t c" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbsl_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b", + "uint8x8_t c" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_f16", + "arguments": [ + "uint16x8_t a", + "float16x8_t b", + "float16x8_t c" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_f32", + "arguments": [ + "uint32x4_t a", + "float32x4_t b", + "float32x4_t c" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_f64", + "arguments": [ + "uint64x2_t a", + "float64x2_t b", + "float64x2_t c" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_p16", + "arguments": [ + "uint16x8_t a", + "poly16x8_t b", + "poly16x8_t c" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_p64", + "arguments": [ + "poly64x2_t a", + "poly64x2_t b", + "poly64x2_t c" + ], + "return_type": { + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_p8", + "arguments": [ + "uint8x16_t a", + "poly8x16_t b", + "poly8x16_t c" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_s16", + "arguments": [ + "uint16x8_t a", + "int16x8_t b", + "int16x8_t c" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_s32", + "arguments": [ + "uint32x4_t a", + "int32x4_t b", + "int32x4_t c" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_s64", + "arguments": [ + "uint64x2_t a", + "int64x2_t b", + "int64x2_t c" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_s8", + "arguments": [ + "uint8x16_t a", + "int8x16_t b", + "int8x16_t c" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b", + "uint16x8_t c" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b", + "uint64x2_t c" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vbslq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b", + "uint8x16_t c" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "BSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcadd_rot270_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcadd_rot270_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcadd_rot90_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcadd_rot90_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaddq_rot270_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaddq_rot270_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaddq_rot270_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaddq_rot90_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaddq_rot90_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaddq_rot90_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcage_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcage_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcage_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaged_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcageh_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcageq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcageq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcageq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcages_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcagt_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcagt_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcagt_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcagtd_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcagth_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcagtq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcagtq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcagtq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcagts_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcale_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcale_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcale_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaled_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaleh_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaleq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaleq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaleq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcales_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcalt_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcalt_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcalt_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaltd_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcalth_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaltq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaltq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcaltq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcalts_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_p64", + "arguments": [ + "poly64x1_t a", + "poly64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_p8", + "arguments": [ + "poly8x8_t a", + "poly8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceq_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqd_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqd_s64", + "arguments": [ + "int64_t a", + "int64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqd_u64", + "arguments": [ + "uint64_t a", + "uint64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqh_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_p64", + "arguments": [ + "poly64x2_t a", + "poly64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_p8", + "arguments": [ + "poly8x16_t a", + "poly8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqs_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_p64", + "arguments": [ + "poly64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_p8", + "arguments": [ + "poly8x8_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_s64", + "arguments": [ + "int64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_u16", + "arguments": [ + "uint16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_u32", + "arguments": [ + "uint32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_u64", + "arguments": [ + "uint64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqz_u8", + "arguments": [ + "uint8x8_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzd_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzd_s64", + "arguments": [ + "int64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzd_u64", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzh_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_p64", + "arguments": [ + "poly64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_p8", + "arguments": [ + "poly8x16_t a" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_s64", + "arguments": [ + "int64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_u16", + "arguments": [ + "uint16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_u32", + "arguments": [ + "uint32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_u64", + "arguments": [ + "uint64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzq_u8", + "arguments": [ + "uint8x16_t a" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vceqzs_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcge_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcged_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcged_s64", + "arguments": [ + "int64_t a", + "int64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcged_u64", + "arguments": [ + "uint64_t a", + "uint64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeh_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgeq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcges_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgez_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgez_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgez_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgez_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgez_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgez_s64", + "arguments": [ + "int64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgez_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezd_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezd_s64", + "arguments": [ + "int64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezh_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezq_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezq_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezq_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezq_s64", + "arguments": [ + "int64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgezs_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgt_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtd_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtd_s64", + "arguments": [ + "int64_t a", + "int64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtd_u64", + "arguments": [ + "uint64_t a", + "uint64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgth_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgts_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtz_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtz_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtz_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtz_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtz_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtz_s64", + "arguments": [ + "int64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtz_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzd_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzd_s64", + "arguments": [ + "int64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzh_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzq_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzq_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzq_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzq_s64", + "arguments": [ + "int64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcgtzs_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcle_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcled_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcled_s64", + "arguments": [ + "int64_t a", + "int64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcled_u64", + "arguments": [ + "uint64_t a", + "uint64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleh_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcleq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcles_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclez_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclez_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclez_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclez_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclez_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclez_s64", + "arguments": [ + "int64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclez_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezd_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezd_s64", + "arguments": [ + "int64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezh_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezq_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezq_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezq_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezq_s64", + "arguments": [ + "int64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclezs_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcls_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcls_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcls_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcls_u16", + "arguments": [ + "uint16x4_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcls_u32", + "arguments": [ + "uint32x2_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcls_u8", + "arguments": [ + "uint8x8_t a" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclsq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclsq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclsq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclsq_u16", + "arguments": [ + "uint16x8_t a" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclsq_u32", + "arguments": [ + "uint32x4_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclsq_u8", + "arguments": [ + "uint8x16_t a" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclt_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltd_f64", + "arguments": [ + "float64_t a", + "float64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltd_s64", + "arguments": [ + "int64_t a", + "int64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltd_u64", + "arguments": [ + "uint64_t a", + "uint64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclth_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMHI" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclts_f32", + "arguments": [ + "float32_t a", + "float32_t b" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltz_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltz_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltz_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltz_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltz_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltz_s64", + "arguments": [ + "int64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltz_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzd_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzd_s64", + "arguments": [ + "int64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzh_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzq_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzq_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzq_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzq_s64", + "arguments": [ + "int64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcltzs_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclz_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclz_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclz_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclz_u16", + "arguments": [ + "uint16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclz_u32", + "arguments": [ + "uint32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclz_u8", + "arguments": [ + "uint8x8_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclzq_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclzq_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclzq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclzq_u16", + "arguments": [ + "uint16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclzq_u32", + "arguments": [ + "uint32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vclzq_u8", + "arguments": [ + "uint8x16_t a" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_lane_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x4_t b", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_lane_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x2_t b", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 0 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_laneq_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x8_t b", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_laneq_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x4_t b", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot180_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot180_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot180_lane_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x4_t b", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot180_lane_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x2_t b", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 0 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot180_laneq_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x8_t b", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot180_laneq_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x4_t b", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot270_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot270_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot270_lane_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x4_t b", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot270_lane_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x2_t b", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 0 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot270_laneq_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x8_t b", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot270_laneq_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x4_t b", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot90_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot90_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot90_lane_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x4_t b", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot90_lane_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x2_t b", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 0 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot90_laneq_f16", + "arguments": [ + "float16x4_t r", + "float16x4_t a", + "float16x8_t b", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmla_rot90_laneq_f32", + "arguments": [ + "float32x2_t r", + "float32x2_t a", + "float32x4_t b", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_f64", + "arguments": [ + "float64x2_t r", + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_lane_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x4_t b", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_lane_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x2_t b", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 0 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_laneq_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x8_t b", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_laneq_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x4_t b", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot180_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot180_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot180_f64", + "arguments": [ + "float64x2_t r", + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot180_lane_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x4_t b", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot180_lane_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x2_t b", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 0 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot180_laneq_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x8_t b", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot180_laneq_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x4_t b", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot270_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot270_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot270_f64", + "arguments": [ + "float64x2_t r", + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot270_lane_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x4_t b", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot270_lane_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x2_t b", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 0 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot270_laneq_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x8_t b", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot270_laneq_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x4_t b", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot90_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot90_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot90_f64", + "arguments": [ + "float64x2_t r", + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot90_lane_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x4_t b", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot90_lane_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x2_t b", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 0 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot90_laneq_f16", + "arguments": [ + "float16x8_t r", + "float16x8_t a", + "float16x8_t b", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcmlaq_rot90_laneq_f32", + "arguments": [ + "float32x4_t r", + "float32x4_t a", + "float32x4_t b", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcnt_p8", + "arguments": [ + "poly8x8_t a" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CNT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcnt_s8", + "arguments": [ + "int8x8_t a" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CNT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcnt_u8", + "arguments": [ + "uint8x8_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CNT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcntq_p8", + "arguments": [ + "poly8x16_t a" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CNT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcntq_s8", + "arguments": [ + "int8x16_t a" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CNT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcntq_u8", + "arguments": [ + "uint8x16_t a" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CNT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_f16", + "arguments": [ + "float16x4_t low", + "float16x4_t high" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.4H" + }, + "low": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_f32", + "arguments": [ + "float32x2_t low", + "float32x2_t high" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.2S" + }, + "low": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_f64", + "arguments": [ + "float64x1_t low", + "float64x1_t high" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.1D" + }, + "low": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_p16", + "arguments": [ + "poly16x4_t low", + "poly16x4_t high" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.4H" + }, + "low": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_p64", + "arguments": [ + "poly64x1_t low", + "poly64x1_t high" + ], + "return_type": { + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.1D" + }, + "low": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_p8", + "arguments": [ + "poly8x8_t low", + "poly8x8_t high" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.8B" + }, + "low": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_s16", + "arguments": [ + "int16x4_t low", + "int16x4_t high" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.4H" + }, + "low": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_s32", + "arguments": [ + "int32x2_t low", + "int32x2_t high" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.2S" + }, + "low": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_s64", + "arguments": [ + "int64x1_t low", + "int64x1_t high" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.1D" + }, + "low": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_s8", + "arguments": [ + "int8x8_t low", + "int8x8_t high" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.8B" + }, + "low": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_u16", + "arguments": [ + "uint16x4_t low", + "uint16x4_t high" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.4H" + }, + "low": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_u32", + "arguments": [ + "uint32x2_t low", + "uint32x2_t high" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.2S" + }, + "low": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_u64", + "arguments": [ + "uint64x1_t low", + "uint64x1_t high" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.1D" + }, + "low": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcombine_u8", + "arguments": [ + "uint8x8_t low", + "uint8x8_t high" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "high": { + "register": "Vm.8B" + }, + "low": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP", + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_f32", + "arguments": [ + "float32x2_t a", + "const int lane1", + "float32x2_t b", + "const int lane2" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_f64", + "arguments": [ + "float64x1_t a", + "const int lane1", + "float64x1_t b", + "const int lane2" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "UNUSED" + }, + "b": { + "register": "Vn.1D" + }, + "lane1": { + "minimum": 0, + "maximum": 0 + }, + "lane2": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_p16", + "arguments": [ + "poly16x4_t a", + "const int lane1", + "poly16x4_t b", + "const int lane2" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_p64", + "arguments": [ + "poly64x1_t a", + "const int lane1", + "poly64x1_t b", + "const int lane2" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "UNUSED" + }, + "b": { + "register": "Vn.1D" + }, + "lane1": { + "minimum": 0, + "maximum": 0 + }, + "lane2": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_p8", + "arguments": [ + "poly8x8_t a", + "const int lane1", + "poly8x8_t b", + "const int lane2" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_s16", + "arguments": [ + "int16x4_t a", + "const int lane1", + "int16x4_t b", + "const int lane2" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_s32", + "arguments": [ + "int32x2_t a", + "const int lane1", + "int32x2_t b", + "const int lane2" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_s64", + "arguments": [ + "int64x1_t a", + "const int lane1", + "int64x1_t b", + "const int lane2" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "UNUSED" + }, + "b": { + "register": "Vn.1D" + }, + "lane1": { + "minimum": 0, + "maximum": 0 + }, + "lane2": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_s8", + "arguments": [ + "int8x8_t a", + "const int lane1", + "int8x8_t b", + "const int lane2" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_u16", + "arguments": [ + "uint16x4_t a", + "const int lane1", + "uint16x4_t b", + "const int lane2" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_u32", + "arguments": [ + "uint32x2_t a", + "const int lane1", + "uint32x2_t b", + "const int lane2" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_u64", + "arguments": [ + "uint64x1_t a", + "const int lane1", + "uint64x1_t b", + "const int lane2" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "UNUSED" + }, + "b": { + "register": "Vn.1D" + }, + "lane1": { + "minimum": 0, + "maximum": 0 + }, + "lane2": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_lane_u8", + "arguments": [ + "uint8x8_t a", + "const int lane1", + "uint8x8_t b", + "const int lane2" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_f32", + "arguments": [ + "float32x2_t a", + "const int lane1", + "float32x4_t b", + "const int lane2" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2S" + }, + "b": { + "register": "Vn.4S" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_f64", + "arguments": [ + "float64x1_t a", + "const int lane1", + "float64x2_t b", + "const int lane2" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "UNUSED" + }, + "b": { + "register": "Vn.2D" + }, + "lane1": { + "minimum": 0, + "maximum": 0 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_p16", + "arguments": [ + "poly16x4_t a", + "const int lane1", + "poly16x8_t b", + "const int lane2" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vn.8H" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_p64", + "arguments": [ + "poly64x1_t a", + "const int lane1", + "poly64x2_t b", + "const int lane2" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "UNUSED" + }, + "b": { + "register": "Vn.2D" + }, + "lane1": { + "minimum": 0, + "maximum": 0 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_p8", + "arguments": [ + "poly8x8_t a", + "const int lane1", + "poly8x16_t b", + "const int lane2" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.16B" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_s16", + "arguments": [ + "int16x4_t a", + "const int lane1", + "int16x8_t b", + "const int lane2" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vn.8H" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_s32", + "arguments": [ + "int32x2_t a", + "const int lane1", + "int32x4_t b", + "const int lane2" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2S" + }, + "b": { + "register": "Vn.4S" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_s64", + "arguments": [ + "int64x1_t a", + "const int lane1", + "int64x2_t b", + "const int lane2" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "UNUSED" + }, + "b": { + "register": "Vn.2D" + }, + "lane1": { + "minimum": 0, + "maximum": 0 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_s8", + "arguments": [ + "int8x8_t a", + "const int lane1", + "int8x16_t b", + "const int lane2" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.16B" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_u16", + "arguments": [ + "uint16x4_t a", + "const int lane1", + "uint16x8_t b", + "const int lane2" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vn.8H" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_u32", + "arguments": [ + "uint32x2_t a", + "const int lane1", + "uint32x4_t b", + "const int lane2" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2S" + }, + "b": { + "register": "Vn.4S" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_u64", + "arguments": [ + "uint64x1_t a", + "const int lane1", + "uint64x2_t b", + "const int lane2" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "UNUSED" + }, + "b": { + "register": "Vn.2D" + }, + "lane1": { + "minimum": 0, + "maximum": 0 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopy_laneq_u8", + "arguments": [ + "uint8x8_t a", + "const int lane1", + "uint8x16_t b", + "const int lane2" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.16B" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_f32", + "arguments": [ + "float32x4_t a", + "const int lane1", + "float32x2_t b", + "const int lane2" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.2S" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_f64", + "arguments": [ + "float64x2_t a", + "const int lane1", + "float64x1_t b", + "const int lane2" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.1D" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_p16", + "arguments": [ + "poly16x8_t a", + "const int lane1", + "poly16x4_t b", + "const int lane2" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.4H" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_p64", + "arguments": [ + "poly64x2_t a", + "const int lane1", + "poly64x1_t b", + "const int lane2" + ], + "return_type": { + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.1D" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_p8", + "arguments": [ + "poly8x16_t a", + "const int lane1", + "poly8x8_t b", + "const int lane2" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.8B" + }, + "lane1": { + "minimum": 0, + "maximum": 15 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_s16", + "arguments": [ + "int16x8_t a", + "const int lane1", + "int16x4_t b", + "const int lane2" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.4H" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_s32", + "arguments": [ + "int32x4_t a", + "const int lane1", + "int32x2_t b", + "const int lane2" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.2S" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_s64", + "arguments": [ + "int64x2_t a", + "const int lane1", + "int64x1_t b", + "const int lane2" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.1D" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_s8", + "arguments": [ + "int8x16_t a", + "const int lane1", + "int8x8_t b", + "const int lane2" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.8B" + }, + "lane1": { + "minimum": 0, + "maximum": 15 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_u16", + "arguments": [ + "uint16x8_t a", + "const int lane1", + "uint16x4_t b", + "const int lane2" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.4H" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_u32", + "arguments": [ + "uint32x4_t a", + "const int lane1", + "uint32x2_t b", + "const int lane2" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.2S" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_u64", + "arguments": [ + "uint64x2_t a", + "const int lane1", + "uint64x1_t b", + "const int lane2" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.1D" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_lane_u8", + "arguments": [ + "uint8x16_t a", + "const int lane1", + "uint8x8_t b", + "const int lane2" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.8B" + }, + "lane1": { + "minimum": 0, + "maximum": 15 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_f32", + "arguments": [ + "float32x4_t a", + "const int lane1", + "float32x4_t b", + "const int lane2" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_f64", + "arguments": [ + "float64x2_t a", + "const int lane1", + "float64x2_t b", + "const int lane2" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2D" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_p16", + "arguments": [ + "poly16x8_t a", + "const int lane1", + "poly16x8_t b", + "const int lane2" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_p64", + "arguments": [ + "poly64x2_t a", + "const int lane1", + "poly64x2_t b", + "const int lane2" + ], + "return_type": { + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2D" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_p8", + "arguments": [ + "poly8x16_t a", + "const int lane1", + "poly8x16_t b", + "const int lane2" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "lane1": { + "minimum": 0, + "maximum": 15 + }, + "lane2": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_s16", + "arguments": [ + "int16x8_t a", + "const int lane1", + "int16x8_t b", + "const int lane2" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_s32", + "arguments": [ + "int32x4_t a", + "const int lane1", + "int32x4_t b", + "const int lane2" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_s64", + "arguments": [ + "int64x2_t a", + "const int lane1", + "int64x2_t b", + "const int lane2" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2D" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_s8", + "arguments": [ + "int8x16_t a", + "const int lane1", + "int8x16_t b", + "const int lane2" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "lane1": { + "minimum": 0, + "maximum": 15 + }, + "lane2": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_u16", + "arguments": [ + "uint16x8_t a", + "const int lane1", + "uint16x8_t b", + "const int lane2" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" + }, + "lane1": { + "minimum": 0, + "maximum": 7 + }, + "lane2": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_u32", + "arguments": [ + "uint32x4_t a", + "const int lane1", + "uint32x4_t b", + "const int lane2" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + }, + "lane1": { + "minimum": 0, + "maximum": 3 + }, + "lane2": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_u64", + "arguments": [ + "uint64x2_t a", + "const int lane1", + "uint64x2_t b", + "const int lane2" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2D" + }, + "lane1": { + "minimum": 0, + "maximum": 1 + }, + "lane2": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcopyq_laneq_u8", + "arguments": [ + "uint8x16_t a", + "const int lane1", + "uint8x16_t b", + "const int lane2" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "lane1": { + "minimum": 0, + "maximum": 15 + }, + "lane2": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_f16", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_f32", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_f64", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_p16", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_p64", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_p8", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_s16", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_s32", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_s64", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_s8", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_u16", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_u32", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_u64", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcreate_u8", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Xn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f16_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f16_s16", + "arguments": [ + "int16x4_t a" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f16_u16", + "arguments": [ + "uint16x4_t a" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f32_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f32_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f32_s32", + "arguments": [ + "int32x2_t a" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f32_u32", + "arguments": [ + "uint32x2_t a" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f64_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f64_s64", + "arguments": [ + "int64x1_t a" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_f64_u64", + "arguments": [ + "uint64x1_t a" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_high_f16_f32", + "arguments": [ + "float16x4_t r", + "float32x4_t a" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_high_f32_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_high_f32_f64", + "arguments": [ + "float32x2_t r", + "float64x2_t a" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_high_f64_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTL2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_f16_s16", + "arguments": [ + "int16x4_t a", + "const int n" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_f16_u16", + "arguments": [ + "uint16x4_t a", + "const int n" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_f32_s32", + "arguments": [ + "int32x2_t a", + "const int n" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_f32_u32", + "arguments": [ + "uint32x2_t a", + "const int n" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_f64_s64", + "arguments": [ + "int64x1_t a", + "const int n" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_f64_u64", + "arguments": [ + "uint64x1_t a", + "const int n" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_s16_f16", + "arguments": [ + "float16x4_t a", + "const int n" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_s32_f32", + "arguments": [ + "float32x2_t a", + "const int n" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_s64_f64", + "arguments": [ + "float64x1_t a", + "const int n" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_u16_f16", + "arguments": [ + "float16x4_t a", + "const int n" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_u32_f32", + "arguments": [ + "float32x2_t a", + "const int n" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_n_u64_f64", + "arguments": [ + "float64x1_t a", + "const int n" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_s16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_s32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_s64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_u16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_u32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvt_u64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvta_s16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvta_s32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvta_s64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvta_u16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvta_u32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvta_u64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtad_s32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtad_s64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtad_u32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtad_u64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtah_s16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtah_s32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtah_s64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtah_u16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtah_u32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtah_u64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtaq_s16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtaq_s32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtaq_s64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtaq_u16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtaq_u32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtaq_u64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtas_s32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtas_s64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtas_u32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtas_u64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_f64_s64", + "arguments": [ + "int64_t a" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_f64_u64", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_n_f64_s64", + "arguments": [ + "int64_t a", + "const int n" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_n_f64_u64", + "arguments": [ + "uint64_t a", + "const int n" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_n_s64_f64", + "arguments": [ + "float64_t a", + "const int n" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_n_u64_f64", + "arguments": [ + "float64_t a", + "const int n" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_s32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_s64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_u32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_u64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_f16_s16", + "arguments": [ + "int16_t a" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_f16_s32", + "arguments": [ + "int32_t a" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_f16_s64", + "arguments": [ + "int64_t a" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_f16_u16", + "arguments": [ + "uint16_t a" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_f16_u32", + "arguments": [ + "uint32_t a" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_f16_u64", + "arguments": [ + "uint64_t a" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_f16_s16", + "arguments": [ + "int16_t a", + "const int n" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_f16_s32", + "arguments": [ + "int32_t a", + "const int n" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_f16_s64", + "arguments": [ + "int64_t a", + "const int n" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_f16_u16", + "arguments": [ + "uint16_t a", + "const int n" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_f16_u32", + "arguments": [ + "uint32_t a", + "const int n" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_f16_u64", + "arguments": [ + "uint64_t a", + "const int n" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_s16_f16", + "arguments": [ + "float16_t a", + "const int n" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_s32_f16", + "arguments": [ + "float16_t a", + "const int n" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_s64_f16", + "arguments": [ + "float16_t a", + "const int n" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_u16_f16", + "arguments": [ + "float16_t a", + "const int n" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_u32_f16", + "arguments": [ + "float16_t a", + "const int n" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_n_u64_f16", + "arguments": [ + "float16_t a", + "const int n" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_s16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_s32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_s64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_u16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_u32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvth_u64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtm_s16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtm_s32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtm_s64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtm_u16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtm_u32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtm_u64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmd_s32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmd_s64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmd_u32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmd_u64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmh_s16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmh_s32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmh_s64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmh_u16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmh_u32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmh_u64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmq_s16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmq_s32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmq_s64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmq_u16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmq_u32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmq_u64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtms_s32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtms_s64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtms_u32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtms_u64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtn_s16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtn_s32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtn_s64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtn_u16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtn_u32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtn_u64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnd_s32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnd_s64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnd_u32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnd_u64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnh_s16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnh_s32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnh_s64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnh_u16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnh_u32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnh_u64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnq_s16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnq_s32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnq_s64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnq_u16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnq_u32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnq_u64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtns_s32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtns_s64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtns_u32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtns_u64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtp_s16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtp_s32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtp_s64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtp_u16_f16", + "arguments": [ + "float16x4_t a" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtp_u32_f32", + "arguments": [ + "float32x2_t a" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtp_u64_f64", + "arguments": [ + "float64x1_t a" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpd_s32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpd_s64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpd_u32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpd_u64_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtph_s16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtph_s32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtph_s64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtph_u16_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtph_u32_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtph_u64_f16", + "arguments": [ + "float16_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpq_s16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpq_s32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpq_s64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpq_u16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpq_u32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpq_u64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtps_s32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtps_s64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtps_u32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtps_u64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_f16_s16", + "arguments": [ + "int16x8_t a" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_f16_u16", + "arguments": [ + "uint16x8_t a" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_f32_s32", + "arguments": [ + "int32x4_t a" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_f32_u32", + "arguments": [ + "uint32x4_t a" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_f64_s64", + "arguments": [ + "int64x2_t a" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_f64_u64", + "arguments": [ + "uint64x2_t a" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_f16_s16", + "arguments": [ + "int16x8_t a", + "const int n" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_f16_u16", + "arguments": [ + "uint16x8_t a", + "const int n" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_f32_s32", + "arguments": [ + "int32x4_t a", + "const int n" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_f32_u32", + "arguments": [ + "uint32x4_t a", + "const int n" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_f64_s64", + "arguments": [ + "int64x2_t a", + "const int n" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_f64_u64", + "arguments": [ + "uint64x2_t a", + "const int n" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_s16_f16", + "arguments": [ + "float16x8_t a", + "const int n" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_s32_f32", + "arguments": [ + "float32x4_t a", + "const int n" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_s64_f64", + "arguments": [ + "float64x2_t a", + "const int n" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_u16_f16", + "arguments": [ + "float16x8_t a", + "const int n" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_u32_f32", + "arguments": [ + "float32x4_t a", + "const int n" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_n_u64_f64", + "arguments": [ + "float64x2_t a", + "const int n" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "n": { + "minimum": 1, + "maximum": 64 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_s16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_s32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_s64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_u16_f16", + "arguments": [ + "float16x8_t a" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_u32_f32", + "arguments": [ + "float32x4_t a" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtq_u64_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_f32_s32", + "arguments": [ + "int32_t a" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_f32_u32", + "arguments": [ + "uint32_t a" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_n_f32_s32", + "arguments": [ + "int32_t a", + "const int n" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_n_f32_u32", + "arguments": [ + "uint32_t a", + "const int n" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_n_s32_f32", + "arguments": [ + "float32_t a", + "const int n" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_n_u32_f32", + "arguments": [ + "float32_t a", + "const int n" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "n": { + "minimum": 1, + "maximum": 32 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_s32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_s64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_u32_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_u64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtx_f32_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTXN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtx_high_f32_f64", + "arguments": [ + "float32x2_t r", + "float64x2_t a" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTXN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vcvtxd_f32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTXN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdiv_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdiv_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdiv_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdivh_f16", + "arguments": [ + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdivq_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdivq_f32", + "arguments": [ + "float32x4_t a", + "float32x4_t b" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdivq_f64", + "arguments": [ + "float64x2_t a", + "float64x2_t b" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdot_lane_s32", + "arguments": [ + "int32x2_t r", + "int8x8_t a", + "int8x8_t b", + "const int lane" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdot_lane_u32", + "arguments": [ + "uint32x2_t r", + "uint8x8_t a", + "uint8x8_t b", + "const int lane" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdot_laneq_s32", + "arguments": [ + "int32x2_t r", + "int8x8_t a", + "int8x16_t b", + "const int lane" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdot_laneq_u32", + "arguments": [ + "uint32x2_t r", + "uint8x8_t a", + "uint8x16_t b", + "const int lane" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdot_s32", + "arguments": [ + "int32x2_t r", + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdot_u32", + "arguments": [ + "uint32x2_t r", + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "r": { + "register": "Vd.2S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdotq_lane_s32", + "arguments": [ + "int32x4_t r", + "int8x16_t a", + "int8x8_t b", + "const int lane" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdotq_lane_u32", + "arguments": [ + "uint32x4_t r", + "uint8x16_t a", + "uint8x8_t b", + "const int lane" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdotq_laneq_s32", + "arguments": [ + "int32x4_t r", + "int8x16_t a", + "int8x16_t b", + "const int lane" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdotq_laneq_u32", + "arguments": [ + "uint32x4_t r", + "uint8x16_t a", + "uint8x16_t b", + "const int lane" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdotq_s32", + "arguments": [ + "int32x4_t r", + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdotq_u32", + "arguments": [ + "uint32x4_t r", + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "r": { + "register": "Vd.4S" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_f16", + "arguments": [ + "float16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_f32", + "arguments": [ + "float32x2_t vec", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_f64", + "arguments": [ + "float64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_p16", + "arguments": [ + "poly16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_p64", + "arguments": [ + "poly64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_p8", + "arguments": [ + "poly8x8_t vec", + "const int lane" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_s16", + "arguments": [ + "int16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_s32", + "arguments": [ + "int32x2_t vec", + "const int lane" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_s64", + "arguments": [ + "int64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_s8", + "arguments": [ + "int8x8_t vec", + "const int lane" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_u16", + "arguments": [ + "uint16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_u32", + "arguments": [ + "uint32x2_t vec", + "const int lane" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_u64", + "arguments": [ + "uint64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_lane_u8", + "arguments": [ + "uint8x8_t vec", + "const int lane" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_f16", + "arguments": [ + "float16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_f32", + "arguments": [ + "float32x4_t vec", + "const int lane" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_f64", + "arguments": [ + "float64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_p16", + "arguments": [ + "poly16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_p64", + "arguments": [ + "poly64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_p8", + "arguments": [ + "poly8x16_t vec", + "const int lane" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, + "vec": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_s16", + "arguments": [ + "int16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_s32", + "arguments": [ + "int32x4_t vec", + "const int lane" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_s64", + "arguments": [ + "int64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_s8", + "arguments": [ + "int8x16_t vec", + "const int lane" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, + "vec": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_u16", + "arguments": [ + "uint16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_u32", + "arguments": [ + "uint32x4_t vec", + "const int lane" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_u64", + "arguments": [ + "uint64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_laneq_u8", + "arguments": [ + "uint8x16_t vec", + "const int lane" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, + "vec": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_f16", + "arguments": [ + "float16_t value" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_f32", + "arguments": [ + "float32_t value" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_f64", + "arguments": [ + "float64_t value" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_p16", + "arguments": [ + "poly16_t value" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_p64", + "arguments": [ + "poly64_t value" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_p8", + "arguments": [ + "poly8_t value" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_s16", + "arguments": [ + "int16_t value" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_s32", + "arguments": [ + "int32_t value" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_s64", + "arguments": [ + "int64_t value" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_s8", + "arguments": [ + "int8_t value" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_u16", + "arguments": [ + "uint16_t value" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_u32", + "arguments": [ + "uint32_t value" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_u64", + "arguments": [ + "uint64_t value" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "INS" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdup_n_u8", + "arguments": [ + "uint8_t value" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupb_lane_p8", + "arguments": [ + "poly8x8_t vec", + "const int lane" + ], + "return_type": { + "value": "poly8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupb_lane_s8", + "arguments": [ + "int8x8_t vec", + "const int lane" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupb_lane_u8", + "arguments": [ + "uint8x8_t vec", + "const int lane" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupb_laneq_p8", + "arguments": [ + "poly8x16_t vec", + "const int lane" + ], + "return_type": { + "value": "poly8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, + "vec": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupb_laneq_s8", + "arguments": [ + "int8x16_t vec", + "const int lane" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, + "vec": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupb_laneq_u8", + "arguments": [ + "uint8x16_t vec", + "const int lane" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, + "vec": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupd_lane_f64", + "arguments": [ + "float64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupd_lane_s64", + "arguments": [ + "int64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupd_lane_u64", + "arguments": [ + "uint64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupd_laneq_f64", + "arguments": [ + "float64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupd_laneq_s64", + "arguments": [ + "int64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupd_laneq_u64", + "arguments": [ + "uint64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vduph_lane_f16", + "arguments": [ + "float16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vduph_lane_p16", + "arguments": [ + "poly16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "poly16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vduph_lane_s16", + "arguments": [ + "int16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vduph_lane_u16", + "arguments": [ + "uint16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vduph_laneq_f16", + "arguments": [ + "float16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vduph_laneq_p16", + "arguments": [ + "poly16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "poly16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vduph_laneq_s16", + "arguments": [ + "int16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vduph_laneq_u16", + "arguments": [ + "uint16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_f16", + "arguments": [ + "float16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_f32", + "arguments": [ + "float32x2_t vec", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_f64", + "arguments": [ + "float64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_p16", + "arguments": [ + "poly16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_p64", + "arguments": [ + "poly64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_p8", + "arguments": [ + "poly8x8_t vec", + "const int lane" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_s16", + "arguments": [ + "int16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_s32", + "arguments": [ + "int32x2_t vec", + "const int lane" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_s64", + "arguments": [ + "int64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_s8", + "arguments": [ + "int8x8_t vec", + "const int lane" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_u16", + "arguments": [ + "uint16x4_t vec", + "const int lane" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_u32", + "arguments": [ + "uint32x2_t vec", + "const int lane" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_u64", + "arguments": [ + "uint64x1_t vec", + "const int lane" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "vec": { + "register": "Vn.1D" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_lane_u8", + "arguments": [ + "uint8x8_t vec", + "const int lane" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_f16", + "arguments": [ + "float16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_f32", + "arguments": [ + "float32x4_t vec", + "const int lane" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_f64", + "arguments": [ + "float64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_p16", + "arguments": [ + "poly16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_p64", + "arguments": [ + "poly64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_p8", + "arguments": [ + "poly8x16_t vec", + "const int lane" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, + "vec": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_s16", + "arguments": [ + "int16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_s32", + "arguments": [ + "int32x4_t vec", + "const int lane" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_s64", + "arguments": [ + "int64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_s8", + "arguments": [ + "int8x16_t vec", + "const int lane" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, + "vec": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_u16", + "arguments": [ + "uint16x8_t vec", + "const int lane" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, + "vec": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_u32", + "arguments": [ + "uint32x4_t vec", + "const int lane" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_u64", + "arguments": [ + "uint64x2_t vec", + "const int lane" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_laneq_u8", + "arguments": [ + "uint8x16_t vec", + "const int lane" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, + "vec": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_f16", + "arguments": [ + "float16_t value" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_f32", + "arguments": [ + "float32_t value" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_f64", + "arguments": [ + "float64_t value" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_p16", + "arguments": [ + "poly16_t value" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_p64", + "arguments": [ + "poly64_t value" + ], + "return_type": { + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_p8", + "arguments": [ + "poly8_t value" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_s16", + "arguments": [ + "int16_t value" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_s32", + "arguments": [ + "int32_t value" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_s64", + "arguments": [ + "int64_t value" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_s8", + "arguments": [ + "int8_t value" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_u16", + "arguments": [ + "uint16_t value" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_u32", + "arguments": [ + "uint32_t value" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_u64", + "arguments": [ + "uint64_t value" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdupq_n_u8", + "arguments": [ + "uint8_t value" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdups_lane_f32", + "arguments": [ + "float32x2_t vec", + "const int lane" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdups_lane_s32", + "arguments": [ + "int32x2_t vec", + "const int lane" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdups_lane_u32", + "arguments": [ + "uint32x2_t vec", + "const int lane" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "vec": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdups_laneq_f32", + "arguments": [ + "float32x4_t vec", + "const int lane" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdups_laneq_s32", + "arguments": [ + "int32x4_t vec", + "const int lane" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vdups_laneq_u32", + "arguments": [ + "uint32x4_t vec", + "const int lane" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, + "vec": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor3q_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b", + "int16x8_t c" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor3q_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b", + "int32x4_t c" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor3q_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b", + "int64x2_t c" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor3q_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b", + "int8x16_t c" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor3q_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b", + "uint16x8_t c" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor3q_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor3q_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b", + "uint64x2_t c" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor3q_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b", + "uint8x16_t c" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor_u64", + "arguments": [ + "uint64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veor_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veorq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veorq_s32", + "arguments": [ + "int32x4_t a", + "int32x4_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veorq_s64", + "arguments": [ + "int64x2_t a", + "int64x2_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veorq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veorq_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veorq_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veorq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "veorq_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b", + "const int n" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b", + "const int n" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_f64", + "arguments": [ + "float64x1_t a", + "float64x1_t b", + "const int n" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_p16", + "arguments": [ + "poly16x4_t a", + "poly16x4_t b", + "const int n" + ], + "return_type": { + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_p64", + "arguments": [ + "poly64x1_t a", + "poly64x1_t b", + "const int n" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_p8", + "arguments": [ + "poly8x8_t a", + "poly8x8_t b", + "const int n" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b", + "const int n" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 3 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b", + "const int n" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 1 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_s64", + "arguments": [ + "int64x1_t a", + "int64x1_t b", + "const int n" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 0 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b", + "const int n" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "EXT" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vext_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b", + "const int n" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + }, + "n": { "minimum": 0, "maximum": 3 } @@ -30097,7 +233810,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.4H " + "register": "Vd.4H" }, "b": { "register": "Vn.4H" @@ -30680,7 +234393,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.8H " + "register": "Vd.8H" }, "b": { "register": "Vn.8H" @@ -30846,8 +234559,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vd.2H" + }, "r": { "register": "Vd.2S" } @@ -30875,8 +234592,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -30908,8 +234629,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -30941,8 +234666,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -30974,8 +234703,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31006,8 +234739,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vd.2H" + }, "r": { "register": "Vd.2S" } @@ -31034,8 +234771,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vd.4H" + }, "r": { "register": "Vd.4S" } @@ -31063,8 +234804,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -31096,8 +234841,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -31129,8 +234878,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31162,8 +234915,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31194,8 +234951,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vd.4H" + }, "r": { "register": "Vd.4S" } @@ -31222,8 +234983,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vd.2H" + }, "r": { "register": "Vd.2S" } @@ -31251,8 +235016,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -31284,8 +235053,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -31317,8 +235090,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31350,8 +235127,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31382,8 +235163,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vd.2H" + }, "r": { "register": "Vd.2S" } @@ -31410,8 +235195,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vd.4H" + }, "r": { "register": "Vd.4S" } @@ -31439,8 +235228,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -31472,8 +235265,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -31505,8 +235302,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31538,8 +235339,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31570,8 +235375,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vd.4H" + }, "r": { "register": "Vd.4S" } @@ -31911,7 +235720,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.4H " + "register": "Vd.4H" }, "b": { "register": "Vn.4H" @@ -32492,7 +236301,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.8H " + "register": "Vd.8H" }, "b": { "register": "Vn.8H" @@ -34868,230 +238677,6 @@ ] ] }, - { - "SIMD_ISA": "Neon", - "name": "vldap1_lane_u64", - "arguments": [ - "uint64_t const * ptr", - "uint64x1_t src", - "const int lane" - ], - "return_type": { - "value": "uint64x1_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.1D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vldap1_lane_s64", - "arguments": [ - "int64_t const * ptr", - "int64x1_t src", - "const int lane" - ], - "return_type": { - "value": "int64x1_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.1D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vldap1q_lane_u64", - "arguments": [ - "uint64_t const * ptr", - "uint64x2_t src", - "const int lane" - ], - "return_type": { - "value": "uint64x2_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vldap1q_lane_s64", - "arguments": [ - "int64_t const * ptr", - "int64x2_t src", - "const int lane" - ], - "return_type": { - "value": "int64x2_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vldap1_lane_p64", - "arguments": [ - "poly64_t const * ptr", - "poly64x1_t src", - "const int lane" - ], - "return_type": { - "value": "poly64x1_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.1D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vldap1q_lane_p64", - "arguments": [ - "poly64_t const * ptr", - "poly64x2_t src", - "const int lane" - ], - "return_type": { - "value": "poly64x2_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vldap1q_lane_f64", - "arguments": [ - "float64_t const * ptr", - "float64x2_t src", - "const int lane" - ], - "return_type": { - "value": "float64x2_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, { "SIMD_ISA": "Neon", "name": "vld1_dup_f16", @@ -39947,7 +243532,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { "register": "Vt2.4H" } }, @@ -39981,7 +243569,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { "register": "Vt2.2S" } }, @@ -40015,7 +243606,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { "register": "Vt2.1D" } }, @@ -40047,7 +243641,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { "register": "Vt2.4H" } }, @@ -40081,7 +243678,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { "register": "Vt2.1D" } }, @@ -40113,7 +243713,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { "register": "Vt2.8B" } }, @@ -40147,7 +243750,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { "register": "Vt2.4H" } }, @@ -40181,7 +243787,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { "register": "Vt2.2S" } }, @@ -40215,7 +243824,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { "register": "Vt2.1D" } }, @@ -40247,7 +243859,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { "register": "Vt2.8B" } }, @@ -40281,7 +243896,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { "register": "Vt2.4H" } }, @@ -40315,7 +243933,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { "register": "Vt2.2S" } }, @@ -40349,7 +243970,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { "register": "Vt2.1D" } }, @@ -40381,7 +244005,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { "register": "Vt2.8B" } }, @@ -41104,7 +244731,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { "register": "Vt2.8H" } }, @@ -41138,7 +244768,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { "register": "Vt2.4S" } }, @@ -41172,7 +244805,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { "register": "Vt2.2D" } }, @@ -41204,7 +244840,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { "register": "Vt2.8H" } }, @@ -41238,7 +244877,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { "register": "Vt2.2D" } }, @@ -41270,7 +244912,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { "register": "Vt2.16B" } }, @@ -41302,7 +244947,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { "register": "Vt2.8H" } }, @@ -41336,7 +244984,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { "register": "Vt2.4S" } }, @@ -41370,7 +245021,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { "register": "Vt2.2D" } }, @@ -41402,7 +245056,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { "register": "Vt2.16B" } }, @@ -41434,7 +245091,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { "register": "Vt2.8H" } }, @@ -41468,7 +245128,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { "register": "Vt2.4S" } }, @@ -41502,7 +245165,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { "register": "Vt2.2D" } }, @@ -41534,7 +245200,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { "register": "Vt2.16B" } }, @@ -42255,7 +245924,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { "register": "Vt3.4H" } }, @@ -42289,7 +245964,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { + "register": "Vt2.2S" + }, + "src.val[2]": { "register": "Vt3.2S" } }, @@ -42323,7 +246004,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { "register": "Vt3.1D" } }, @@ -42355,7 +246042,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { "register": "Vt3.4H" } }, @@ -42389,7 +246082,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { "register": "Vt3.1D" } }, @@ -42421,7 +246120,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { + "register": "Vt2.8B" + }, + "src.val[2]": { "register": "Vt3.8B" } }, @@ -42455,7 +246160,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { "register": "Vt3.4H" } }, @@ -42489,7 +246200,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { + "register": "Vt2.2S" + }, + "src.val[2]": { "register": "Vt3.2S" } }, @@ -42523,7 +246240,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { "register": "Vt3.1D" } }, @@ -42555,7 +246278,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { + "register": "Vt2.8B" + }, + "src.val[2]": { "register": "Vt3.8B" } }, @@ -42589,7 +246318,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { "register": "Vt3.4H" } }, @@ -42623,7 +246358,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { + "register": "Vt2.2S" + }, + "src.val[2]": { "register": "Vt3.2S" } }, @@ -42657,7 +246398,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { "register": "Vt3.1D" } }, @@ -42689,7 +246436,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { + "register": "Vt2.8B" + }, + "src.val[2]": { "register": "Vt3.8B" } }, @@ -43412,7 +247165,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { "register": "Vt3.8H" } }, @@ -43446,7 +247205,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { + "register": "Vt2.4S" + }, + "src.val[2]": { "register": "Vt3.4S" } }, @@ -43480,7 +247245,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { "register": "Vt3.2D" } }, @@ -43512,7 +247283,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { "register": "Vt3.8H" } }, @@ -43546,7 +247323,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { "register": "Vt3.2D" } }, @@ -43578,7 +247361,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { + "register": "Vt2.16B" + }, + "src.val[2]": { "register": "Vt3.16B" } }, @@ -43610,7 +247399,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { "register": "Vt3.8H" } }, @@ -43644,7 +247439,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { + "register": "Vt2.4S" + }, + "src.val[2]": { "register": "Vt3.4S" } }, @@ -43678,7 +247479,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { "register": "Vt3.2D" } }, @@ -43710,7 +247517,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { + "register": "Vt2.16B" + }, + "src.val[2]": { "register": "Vt3.16B" } }, @@ -43742,7 +247555,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { "register": "Vt3.8H" } }, @@ -43776,7 +247595,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { + "register": "Vt2.4S" + }, + "src.val[2]": { "register": "Vt3.4S" } }, @@ -43810,7 +247635,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { "register": "Vt3.2D" } }, @@ -43842,7 +247673,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { + "register": "Vt2.16B" + }, + "src.val[2]": { "register": "Vt3.16B" } }, @@ -44563,7 +248400,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { + "register": "Vt3.4H" + }, + "src.val[3]": { "register": "Vt4.4H" } }, @@ -44597,7 +248443,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { + "register": "Vt2.2S" + }, + "src.val[2]": { + "register": "Vt3.2S" + }, + "src.val[3]": { "register": "Vt4.2S" } }, @@ -44631,7 +248486,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { + "register": "Vt3.1D" + }, + "src.val[3]": { "register": "Vt4.1D" } }, @@ -44663,7 +248527,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { + "register": "Vt3.4H" + }, + "src.val[3]": { "register": "Vt4.4H" } }, @@ -44697,7 +248570,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { + "register": "Vt3.1D" + }, + "src.val[3]": { "register": "Vt4.1D" } }, @@ -44729,7 +248611,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { + "register": "Vt2.8B" + }, + "src.val[2]": { + "register": "Vt3.8B" + }, + "src.val[3]": { "register": "Vt4.8B" } }, @@ -44763,7 +248654,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { + "register": "Vt3.4H" + }, + "src.val[3]": { "register": "Vt4.4H" } }, @@ -44797,7 +248697,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { + "register": "Vt2.2S" + }, + "src.val[2]": { + "register": "Vt3.2S" + }, + "src.val[3]": { "register": "Vt4.2S" } }, @@ -44831,7 +248740,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { + "register": "Vt3.1D" + }, + "src.val[3]": { "register": "Vt4.1D" } }, @@ -44863,7 +248781,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { + "register": "Vt2.8B" + }, + "src.val[2]": { + "register": "Vt3.8B" + }, + "src.val[3]": { "register": "Vt4.8B" } }, @@ -44897,7 +248824,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { + "register": "Vt3.4H" + }, + "src.val[3]": { "register": "Vt4.4H" } }, @@ -44931,7 +248867,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { + "register": "Vt2.2S" + }, + "src.val[2]": { + "register": "Vt3.2S" + }, + "src.val[3]": { "register": "Vt4.2S" } }, @@ -44965,7 +248910,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { + "register": "Vt3.1D" + }, + "src.val[3]": { "register": "Vt4.1D" } }, @@ -44997,7 +248951,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { + "register": "Vt2.8B" + }, + "src.val[2]": { + "register": "Vt3.8B" + }, + "src.val[3]": { "register": "Vt4.8B" } }, @@ -45023,842 +248986,12 @@ }, "Arguments_Preparation": { "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4_p64", - "arguments": [ - "poly64_t const * ptr" - ], - "return_type": { - "value": "poly64x1x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "LD1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4_p8", - "arguments": [ - "poly8_t const * ptr" - ], - "return_type": { - "value": "poly8x8x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4_s16", - "arguments": [ - "int16_t const * ptr" - ], - "return_type": { - "value": "int16x4x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4_s32", - "arguments": [ - "int32_t const * ptr" - ], - "return_type": { - "value": "int32x2x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4_s64", - "arguments": [ - "int64_t const * ptr" - ], - "return_type": { - "value": "int64x1x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4_s8", - "arguments": [ - "int8_t const * ptr" - ], - "return_type": { - "value": "int8x8x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4_u16", - "arguments": [ - "uint16_t const * ptr" - ], - "return_type": { - "value": "uint16x4x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4_u32", - "arguments": [ - "uint32_t const * ptr" - ], - "return_type": { - "value": "uint32x2x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4_u64", - "arguments": [ - "uint64_t const * ptr" - ], - "return_type": { - "value": "uint64x1x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4_u8", - "arguments": [ - "uint8_t const * ptr" - ], - "return_type": { - "value": "uint8x8x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_dup_f16", - "arguments": [ - "float16_t const * ptr" - ], - "return_type": { - "value": "float16x8x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4R" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_dup_f32", - "arguments": [ - "float32_t const * ptr" - ], - "return_type": { - "value": "float32x4x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4R" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_dup_f64", - "arguments": [ - "float64_t const * ptr" - ], - "return_type": { - "value": "float64x2x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD4R" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_dup_p16", - "arguments": [ - "poly16_t const * ptr" - ], - "return_type": { - "value": "poly16x8x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4R" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_dup_p64", - "arguments": [ - "poly64_t const * ptr" - ], - "return_type": { - "value": "poly64x2x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD4R" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_dup_p8", - "arguments": [ - "poly8_t const * ptr" - ], - "return_type": { - "value": "poly8x16x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4R" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_dup_s16", - "arguments": [ - "int16_t const * ptr" - ], - "return_type": { - "value": "int16x8x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4R" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_dup_s32", - "arguments": [ - "int32_t const * ptr" - ], - "return_type": { - "value": "int32x4x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4R" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_dup_s64", - "arguments": [ - "int64_t const * ptr" - ], - "return_type": { - "value": "int64x2x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD4R" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_dup_s8", - "arguments": [ - "int8_t const * ptr" - ], - "return_type": { - "value": "int8x16x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4R" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_dup_u16", - "arguments": [ - "uint16_t const * ptr" - ], - "return_type": { - "value": "uint16x8x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4R" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_dup_u32", - "arguments": [ - "uint32_t const * ptr" - ], - "return_type": { - "value": "uint32x4x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4R" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_dup_u64", - "arguments": [ - "uint64_t const * ptr" - ], - "return_type": { - "value": "uint64x2x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD4R" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_dup_u8", - "arguments": [ - "uint8_t const * ptr" - ], - "return_type": { - "value": "uint8x16x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4R" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_f16", - "arguments": [ - "float16_t const * ptr" - ], - "return_type": { - "value": "float16x8x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_f32", - "arguments": [ - "float32_t const * ptr" - ], - "return_type": { - "value": "float32x4x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_f64", - "arguments": [ - "float64_t const * ptr" - ], - "return_type": { - "value": "float64x2x4_t" - }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_lane_f16", - "arguments": [ - "float16_t const * ptr", - "float16x8x4_t src", - "const int lane" - ], - "return_type": { - "value": "float16x8x4_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt4.8H" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_lane_f32", - "arguments": [ - "float32_t const * ptr", - "float32x4x4_t src", - "const int lane" - ], - "return_type": { - "value": "float32x4x4_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt4.4S" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_lane_f64", - "arguments": [ - "float64_t const * ptr", - "float64x2x4_t src", - "const int lane" - ], - "return_type": { - "value": "float64x2x4_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt4.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_lane_p16", - "arguments": [ - "poly16_t const * ptr", - "poly16x8x4_t src", - "const int lane" - ], - "return_type": { - "value": "poly16x8x4_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt4.8H" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "LD4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vld4q_lane_p64", - "arguments": [ - "poly64_t const * ptr", - "poly64x2x4_t src", - "const int lane" - ], - "return_type": { - "value": "poly64x2x4_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt4.2D" + "register": "Xn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -45869,57 +249002,40 @@ }, { "SIMD_ISA": "Neon", - "name": "vld4q_lane_p8", + "name": "vld4_p64", "arguments": [ - "poly8_t const * ptr", - "poly8x16x4_t src", - "const int lane" + "poly64_t const * ptr" ], "return_type": { - "value": "poly8x16x4_t" + "value": "poly64x1x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 - }, "ptr": { "register": "Xn" - }, - "src": { - "register": "Vt4.16B" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "LD4" + "LD1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vld4q_lane_s16", + "name": "vld4_p8", "arguments": [ - "int16_t const * ptr", - "int16x8x4_t src", - "const int lane" + "poly8_t const * ptr" ], "return_type": { - "value": "int16x8x4_t" + "value": "poly8x8x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" - }, - "src": { - "register": "Vt4.8H" } }, "Architectures": [ @@ -45935,25 +249051,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vld4q_lane_s32", + "name": "vld4_s16", "arguments": [ - "int32_t const * ptr", - "int32x4x4_t src", - "const int lane" + "int16_t const * ptr" ], "return_type": { - "value": "int32x4x4_t" + "value": "int16x4x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" - }, - "src": { - "register": "Vt4.4S" } }, "Architectures": [ @@ -45969,28 +249076,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vld4q_lane_s64", + "name": "vld4_s32", "arguments": [ - "int64_t const * ptr", - "int64x2x4_t src", - "const int lane" + "int32_t const * ptr" ], "return_type": { - "value": "int64x2x4_t" + "value": "int32x2x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" - }, - "src": { - "register": "Vt4.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -46001,57 +249101,41 @@ }, { "SIMD_ISA": "Neon", - "name": "vld4q_lane_s8", + "name": "vld4_s64", "arguments": [ - "int8_t const * ptr", - "int8x16x4_t src", - "const int lane" + "int64_t const * ptr" ], "return_type": { - "value": "int8x16x4_t" + "value": "int64x1x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 - }, "ptr": { "register": "Xn" - }, - "src": { - "register": "Vt4.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LD4" + "LD1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vld4q_lane_u16", + "name": "vld4_s8", "arguments": [ - "uint16_t const * ptr", - "uint16x8x4_t src", - "const int lane" + "int8_t const * ptr" ], "return_type": { - "value": "uint16x8x4_t" + "value": "int8x8x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" - }, - "src": { - "register": "Vt4.8H" } }, "Architectures": [ @@ -46067,25 +249151,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vld4q_lane_u32", + "name": "vld4_u16", "arguments": [ - "uint32_t const * ptr", - "uint32x4x4_t src", - "const int lane" + "uint16_t const * ptr" ], "return_type": { - "value": "uint32x4x4_t" + "value": "uint16x4x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" - }, - "src": { - "register": "Vt4.4S" } }, "Architectures": [ @@ -46101,28 +249176,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vld4q_lane_u64", + "name": "vld4_u32", "arguments": [ - "uint64_t const * ptr", - "uint64x2x4_t src", - "const int lane" + "uint32_t const * ptr" ], "return_type": { - "value": "uint64x2x4_t" + "value": "uint32x2x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" - }, - "src": { - "register": "Vt4.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -46133,44 +249201,37 @@ }, { "SIMD_ISA": "Neon", - "name": "vld4q_lane_u8", + "name": "vld4_u64", "arguments": [ - "uint8_t const * ptr", - "uint8x16x4_t src", - "const int lane" + "uint64_t const * ptr" ], "return_type": { - "value": "uint8x16x4_t" + "value": "uint64x1x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 - }, "ptr": { "register": "Xn" - }, - "src": { - "register": "Vt4.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LD4" + "LD1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vld4q_p16", + "name": "vld4_u8", "arguments": [ - "poly16_t const * ptr" + "uint8_t const * ptr" ], "return_type": { - "value": "poly16x8x4_t" + "value": "uint8x8x4_t" }, "Arguments_Preparation": { "ptr": { @@ -46190,12 +249251,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vld4q_p64", + "name": "vld4q_dup_f16", "arguments": [ - "poly64_t const * ptr" + "float16_t const * ptr" ], "return_type": { - "value": "poly64x2x4_t" + "value": "float16x8x4_t" }, "Arguments_Preparation": { "ptr": { @@ -46203,22 +249264,24 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LD4" + "LD4R" ] ] }, { "SIMD_ISA": "Neon", - "name": "vld4q_p8", + "name": "vld4q_dup_f32", "arguments": [ - "poly8_t const * ptr" + "float32_t const * ptr" ], "return_type": { - "value": "poly8x16x4_t" + "value": "float32x4x4_t" }, "Arguments_Preparation": { "ptr": { @@ -46232,18 +249295,18 @@ ], "instructions": [ [ - "LD4" + "LD4R" ] ] }, { "SIMD_ISA": "Neon", - "name": "vld4q_s16", + "name": "vld4q_dup_f64", "arguments": [ - "int16_t const * ptr" + "float64_t const * ptr" ], "return_type": { - "value": "int16x8x4_t" + "value": "float64x2x4_t" }, "Arguments_Preparation": { "ptr": { @@ -46251,24 +249314,22 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "LD4" + "LD4R" ] ] }, { "SIMD_ISA": "Neon", - "name": "vld4q_s32", + "name": "vld4q_dup_p16", "arguments": [ - "int32_t const * ptr" + "poly16_t const * ptr" ], "return_type": { - "value": "int32x4x4_t" + "value": "poly16x8x4_t" }, "Arguments_Preparation": { "ptr": { @@ -46282,18 +249343,18 @@ ], "instructions": [ [ - "LD4" + "LD4R" ] ] }, { "SIMD_ISA": "Neon", - "name": "vld4q_s64", + "name": "vld4q_dup_p64", "arguments": [ - "int64_t const * ptr" + "poly64_t const * ptr" ], "return_type": { - "value": "int64x2x4_t" + "value": "poly64x2x4_t" }, "Arguments_Preparation": { "ptr": { @@ -46305,18 +249366,18 @@ ], "instructions": [ [ - "LD4" + "LD4R" ] ] }, { "SIMD_ISA": "Neon", - "name": "vld4q_s8", + "name": "vld4q_dup_p8", "arguments": [ - "int8_t const * ptr" + "poly8_t const * ptr" ], "return_type": { - "value": "int8x16x4_t" + "value": "poly8x16x4_t" }, "Arguments_Preparation": { "ptr": { @@ -46330,18 +249391,18 @@ ], "instructions": [ [ - "LD4" + "LD4R" ] ] }, { "SIMD_ISA": "Neon", - "name": "vld4q_u16", + "name": "vld4q_dup_s16", "arguments": [ - "uint16_t const * ptr" + "int16_t const * ptr" ], "return_type": { - "value": "uint16x8x4_t" + "value": "int16x8x4_t" }, "Arguments_Preparation": { "ptr": { @@ -46355,18 +249416,18 @@ ], "instructions": [ [ - "LD4" + "LD4R" ] ] }, { "SIMD_ISA": "Neon", - "name": "vld4q_u32", + "name": "vld4q_dup_s32", "arguments": [ - "uint32_t const * ptr" + "int32_t const * ptr" ], "return_type": { - "value": "uint32x4x4_t" + "value": "int32x4x4_t" }, "Arguments_Preparation": { "ptr": { @@ -46380,18 +249441,18 @@ ], "instructions": [ [ - "LD4" + "LD4R" ] ] }, { "SIMD_ISA": "Neon", - "name": "vld4q_u64", + "name": "vld4q_dup_s64", "arguments": [ - "uint64_t const * ptr" + "int64_t const * ptr" ], "return_type": { - "value": "uint64x2x4_t" + "value": "int64x2x4_t" }, "Arguments_Preparation": { "ptr": { @@ -46403,18 +249464,18 @@ ], "instructions": [ [ - "LD4" + "LD4R" ] ] }, { "SIMD_ISA": "Neon", - "name": "vld4q_u8", + "name": "vld4q_dup_s8", "arguments": [ - "uint8_t const * ptr" + "int8_t const * ptr" ], "return_type": { - "value": "uint8x16x4_t" + "value": "int8x16x4_t" }, "Arguments_Preparation": { "ptr": { @@ -46428,18 +249489,18 @@ ], "instructions": [ [ - "LD4" + "LD4R" ] ] }, { "SIMD_ISA": "Neon", - "name": "vldrq_p128", + "name": "vld4q_dup_u16", "arguments": [ - "poly128_t const * ptr" + "uint16_t const * ptr" ], "return_type": { - "value": "poly128_t" + "value": "uint16x8x4_t" }, "Arguments_Preparation": { "ptr": { @@ -46447,115 +249508,101 @@ } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "LDR" + "LD4R" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmax_f16", + "name": "vld4q_dup_u32", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "uint32_t const * ptr" ], "return_type": { - "value": "float16x4_t" + "value": "uint32x4x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "ptr": { + "register": "Xn" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FMAX" + "LD4R" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmax_f32", + "name": "vld4q_dup_u64", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "uint64_t const * ptr" ], "return_type": { - "value": "float32x2_t" + "value": "uint64x2x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" + "ptr": { + "register": "Xn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FMAX" + "LD4R" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmax_f64", + "name": "vld4q_dup_u8", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "uint8_t const * ptr" ], "return_type": { - "value": "float64x1_t" + "value": "uint8x16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" - }, - "b": { - "register": "Dm" + "ptr": { + "register": "Xn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAX" + "LD4R" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmax_s16", + "name": "vld4q_f16", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "float16_t const * ptr" ], "return_type": { - "value": "int16x4_t" + "value": "float16x8x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "ptr": { + "register": "Xn" } }, "Architectures": [ @@ -46565,26 +249612,22 @@ ], "instructions": [ [ - "SMAX" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmax_s32", + "name": "vld4q_f32", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "float32_t const * ptr" ], "return_type": { - "value": "int32x2_t" + "value": "float32x4x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" + "ptr": { + "register": "Xn" } }, "Architectures": [ @@ -46594,55 +249637,63 @@ ], "instructions": [ [ - "SMAX" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmax_s8", + "name": "vld4q_f64", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "float64_t const * ptr" ], "return_type": { - "value": "int8x8_t" + "value": "float64x2x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "ptr": { + "register": "Xn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMAX" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmax_u16", + "name": "vld4q_lane_f16", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "float16_t const * ptr", + "float16x8x4_t src", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "float16x8x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.4H" + "ptr": { + "register": "Xn" + }, + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { + "register": "Vt3.8H" + }, + "src.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ @@ -46652,26 +249703,40 @@ ], "instructions": [ [ - "UMAX" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmax_u32", + "name": "vld4q_lane_f32", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "float32_t const * ptr", + "float32x4x4_t src", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "float32x4x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.2S" + "ptr": { + "register": "Xn" + }, + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { + "register": "Vt2.4S" + }, + "src.val[2]": { + "register": "Vt3.4S" + }, + "src.val[3]": { + "register": "Vt4.4S" } }, "Architectures": [ @@ -46681,295 +249746,460 @@ ], "instructions": [ [ - "UMAX" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmax_u8", + "name": "vld4q_lane_f64", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "float64_t const * ptr", + "float64x2x4_t src", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "float64x2x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.8B" + "ptr": { + "register": "Xn" + }, + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { + "register": "Vt3.2D" + }, + "src.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMAX" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxh_f16", + "name": "vld4q_lane_p16", "arguments": [ - "float16_t a", - "float16_t b" + "poly16_t const * ptr", + "poly16x8x4_t src", + "const int lane" ], "return_type": { - "value": "float16_t" + "value": "poly16x8x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Hm" + "ptr": { + "register": "Xn" + }, + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { + "register": "Vt3.8H" + }, + "src.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAX" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxnm_f16", + "name": "vld4q_lane_p64", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "poly64_t const * ptr", + "poly64x2x4_t src", + "const int lane" ], "return_type": { - "value": "float16x4_t" + "value": "poly64x2x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.4H" + "ptr": { + "register": "Xn" + }, + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { + "register": "Vt3.2D" + }, + "src.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMAXNM" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxnm_f32", + "name": "vld4q_lane_p8", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "poly8_t const * ptr", + "poly8x16x4_t src", + "const int lane" ], "return_type": { - "value": "float32x2_t" + "value": "poly8x16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "lane": { + "minimum": 0, + "maximum": 15 }, - "b": { - "register": "Vm.2S" + "ptr": { + "register": "Xn" + }, + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { + "register": "Vt2.16B" + }, + "src.val[2]": { + "register": "Vt3.16B" + }, + "src.val[3]": { + "register": "Vt4.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMAXNM" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxnm_f64", + "name": "vld4q_lane_s16", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "int16_t const * ptr", + "int16x8x4_t src", + "const int lane" ], "return_type": { - "value": "float64x1_t" + "value": "int16x8x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Dm" + "ptr": { + "register": "Xn" + }, + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { + "register": "Vt3.8H" + }, + "src.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAXNM" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxnmh_f16", + "name": "vld4q_lane_s32", "arguments": [ - "float16_t a", - "float16_t b" + "int32_t const * ptr", + "int32x4x4_t src", + "const int lane" ], "return_type": { - "value": "float16_t" + "value": "int32x4x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Hm" + "ptr": { + "register": "Xn" + }, + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { + "register": "Vt2.4S" + }, + "src.val[2]": { + "register": "Vt3.4S" + }, + "src.val[3]": { + "register": "Vt4.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FMAXNM" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxnmq_f16", + "name": "vld4q_lane_s64", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "int64_t const * ptr", + "int64x2x4_t src", + "const int lane" ], "return_type": { - "value": "float16x8_t" + "value": "int64x2x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.8H" + "ptr": { + "register": "Xn" + }, + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { + "register": "Vt3.2D" + }, + "src.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMAXNM" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxnmq_f32", + "name": "vld4q_lane_s8", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "int8_t const * ptr", + "int8x16x4_t src", + "const int lane" ], "return_type": { - "value": "float32x4_t" + "value": "int8x16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "lane": { + "minimum": 0, + "maximum": 15 }, - "b": { - "register": "Vm.4S" + "ptr": { + "register": "Xn" + }, + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { + "register": "Vt2.16B" + }, + "src.val[2]": { + "register": "Vt3.16B" + }, + "src.val[3]": { + "register": "Vt4.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMAXNM" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxnmq_f64", + "name": "vld4q_lane_u16", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "uint16_t const * ptr", + "uint16x8x4_t src", + "const int lane" ], "return_type": { - "value": "float64x2_t" + "value": "uint16x8x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.2D" + "ptr": { + "register": "Xn" + }, + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { + "register": "Vt3.8H" + }, + "src.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAXNM" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxnmv_f16", + "name": "vld4q_lane_u32", "arguments": [ - "float16x4_t a" + "uint32_t const * ptr", + "uint32x4x4_t src", + "const int lane" ], "return_type": { - "value": "float16_t" + "value": "uint32x4x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "ptr": { + "register": "Xn" + }, + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { + "register": "Vt2.4S" + }, + "src.val[2]": { + "register": "Vt3.4S" + }, + "src.val[3]": { + "register": "Vt4.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAXNMP" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxnmv_f32", + "name": "vld4q_lane_u64", "arguments": [ - "float32x2_t a" + "uint64_t const * ptr", + "uint64x2x4_t src", + "const int lane" ], "return_type": { - "value": "float32_t" + "value": "uint64x2x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" + }, + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { + "register": "Vt3.2D" + }, + "src.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ @@ -46977,22 +250207,40 @@ ], "instructions": [ [ - "FMAXNMP" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxnmvq_f16", + "name": "vld4q_lane_u8", "arguments": [ - "float16x8_t a" + "uint8_t const * ptr", + "uint8x16x4_t src", + "const int lane" ], "return_type": { - "value": "float16_t" + "value": "uint8x16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "lane": { + "minimum": 0, + "maximum": 15 + }, + "ptr": { + "register": "Xn" + }, + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { + "register": "Vt2.16B" + }, + "src.val[2]": { + "register": "Vt3.16B" + }, + "src.val[3]": { + "register": "Vt4.16B" } }, "Architectures": [ @@ -47000,45 +250248,47 @@ ], "instructions": [ [ - "FMAXNMP" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxnmvq_f32", + "name": "vld4q_p16", "arguments": [ - "float32x4_t a" + "poly16_t const * ptr" ], "return_type": { - "value": "float32_t" + "value": "poly16x8x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "ptr": { + "register": "Xn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAXNMV" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxnmvq_f64", + "name": "vld4q_p64", "arguments": [ - "float64x2_t a" + "poly64_t const * ptr" ], "return_type": { - "value": "float64_t" + "value": "poly64x2x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "ptr": { + "register": "Xn" } }, "Architectures": [ @@ -47046,54 +250296,47 @@ ], "instructions": [ [ - "FMAXNMP" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxq_f16", + "name": "vld4q_p8", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "poly8_t const * ptr" ], "return_type": { - "value": "float16x8_t" + "value": "poly8x16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "ptr": { + "register": "Xn" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FMAX" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxq_f32", + "name": "vld4q_s16", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "int16_t const * ptr" ], "return_type": { - "value": "float32x4_t" + "value": "int16x8x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "ptr": { + "register": "Xn" } }, "Architectures": [ @@ -47103,82 +250346,70 @@ ], "instructions": [ [ - "FMAX" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxq_f64", + "name": "vld4q_s32", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "int32_t const * ptr" ], "return_type": { - "value": "float64x2_t" + "value": "int32x4x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "ptr": { + "register": "Xn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAX" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxq_s16", + "name": "vld4q_s64", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int64_t const * ptr" ], "return_type": { - "value": "int16x8_t" + "value": "int64x2x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "ptr": { + "register": "Xn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMAX" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxq_s32", + "name": "vld4q_s8", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int8_t const * ptr" ], "return_type": { - "value": "int32x4_t" + "value": "int8x16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "ptr": { + "register": "Xn" } }, "Architectures": [ @@ -47188,26 +250419,22 @@ ], "instructions": [ [ - "SMAX" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxq_s8", + "name": "vld4q_u16", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "uint16_t const * ptr" ], "return_type": { - "value": "int8x16_t" + "value": "uint16x8x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "ptr": { + "register": "Xn" } }, "Architectures": [ @@ -47217,26 +250444,22 @@ ], "instructions": [ [ - "SMAX" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxq_u16", + "name": "vld4q_u32", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "uint32_t const * ptr" ], "return_type": { - "value": "uint16x8_t" + "value": "uint32x4x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "ptr": { + "register": "Xn" } }, "Architectures": [ @@ -47246,55 +250469,45 @@ ], "instructions": [ [ - "UMAX" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxq_u32", + "name": "vld4q_u64", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "uint64_t const * ptr" ], "return_type": { - "value": "uint32x4_t" + "value": "uint64x2x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "ptr": { + "register": "Xn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMAX" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxq_u8", + "name": "vld4q_u8", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "uint8_t const * ptr" ], "return_type": { - "value": "uint8x16_t" + "value": "uint8x16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "ptr": { + "register": "Xn" } }, "Architectures": [ @@ -47304,22 +250517,31 @@ ], "instructions": [ [ - "UMAX" + "LD4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxv_f16", + "name": "vldap1_lane_f64", "arguments": [ - "float16x4_t a" + "float64_t const * ptr", + "float64x1_t src", + "const int lane" ], "return_type": { - "value": "float16_t" + "value": "float64x1_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.1D" } }, "Architectures": [ @@ -47327,22 +250549,31 @@ ], "instructions": [ [ - "FMAXP" + "LDAP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxv_f32", + "name": "vldap1_lane_p64", "arguments": [ - "float32x2_t a" + "poly64_t const * ptr", + "poly64x1_t src", + "const int lane" ], "return_type": { - "value": "float32_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.1D" } }, "Architectures": [ @@ -47350,22 +250581,31 @@ ], "instructions": [ [ - "FMAXP" + "LDAP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxv_s16", + "name": "vldap1_lane_s64", "arguments": [ - "int16x4_t a" + "int64_t const * ptr", + "int64x1_t src", + "const int lane" ], "return_type": { - "value": "int16_t" + "value": "int64x1_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.1D" } }, "Architectures": [ @@ -47373,22 +250613,31 @@ ], "instructions": [ [ - "SMAXV" + "LDAP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxv_s32", + "name": "vldap1_lane_u64", "arguments": [ - "int32x2_t a" + "uint64_t const * ptr", + "uint64x1_t src", + "const int lane" ], "return_type": { - "value": "int32_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.1D" } }, "Architectures": [ @@ -47396,22 +250645,31 @@ ], "instructions": [ [ - "SMAXP" + "LDAP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxv_s8", + "name": "vldap1q_lane_f64", "arguments": [ - "int8x8_t a" + "float64_t const * ptr", + "float64x2_t src", + "const int lane" ], "return_type": { - "value": "int8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.2D" } }, "Architectures": [ @@ -47419,22 +250677,31 @@ ], "instructions": [ [ - "SMAXV" + "LDAP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxv_u16", + "name": "vldap1q_lane_p64", "arguments": [ - "uint16x4_t a" + "poly64_t const * ptr", + "poly64x2_t src", + "const int lane" ], "return_type": { - "value": "uint16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.2D" } }, "Architectures": [ @@ -47442,22 +250709,31 @@ ], "instructions": [ [ - "UMAXV" + "LDAP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxv_u32", + "name": "vldap1q_lane_s64", "arguments": [ - "uint32x2_t a" + "int64_t const * ptr", + "int64x2_t src", + "const int lane" ], "return_type": { - "value": "uint32_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.2D" } }, "Architectures": [ @@ -47465,22 +250741,31 @@ ], "instructions": [ [ - "UMAXP" + "LDAP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxv_u8", + "name": "vldap1q_lane_u64", "arguments": [ - "uint8x8_t a" + "uint64_t const * ptr", + "uint64x2_t src", + "const int lane" ], "return_type": { - "value": "uint8_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.2D" } }, "Architectures": [ @@ -47488,45 +250773,55 @@ ], "instructions": [ [ - "UMAXV" + "LDAP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxvq_f16", + "name": "vldrq_p128", "arguments": [ - "float16x8_t a" + "poly128_t const * ptr" ], "return_type": { - "value": "float16_t" + "value": "poly128_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "ptr": { + "register": "Xn" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FMAXP" + "LDR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxvq_f32", + "name": "vluti2_lane_f16", "arguments": [ - "float32x4_t a" + "float16x4_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float32_t" + "value": "float16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ @@ -47534,22 +250829,31 @@ ], "instructions": [ [ - "FMAXV" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxvq_f64", + "name": "vluti2_lane_p16", "arguments": [ - "float64x2_t a" + "poly16x4_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float64_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ @@ -47557,22 +250861,31 @@ ], "instructions": [ [ - "FMAXP" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxvq_s16", + "name": "vluti2_lane_p8", "arguments": [ - "int16x8_t a" + "poly8x8_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "int16_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ @@ -47580,22 +250893,31 @@ ], "instructions": [ [ - "SMAXV" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxvq_s32", + "name": "vluti2_lane_s16", "arguments": [ - "int32x4_t a" + "int16x4_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "int32_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ @@ -47603,21 +250925,30 @@ ], "instructions": [ [ - "SMAXV" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxvq_s8", + "name": "vluti2_lane_s8", "arguments": [ - "int8x16_t a" + "int8x8_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "int8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn": { "register": "Vn.16B" } }, @@ -47626,21 +250957,30 @@ ], "instructions": [ [ - "SMAXV" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxvq_u16", + "name": "vluti2_lane_u16", "arguments": [ - "uint16x8_t a" + "uint16x4_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "uint16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "a": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { "register": "Vn.8H" } }, @@ -47649,22 +250989,31 @@ ], "instructions": [ [ - "UMAXV" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxvq_u32", + "name": "vluti2_lane_u8", "arguments": [ - "uint32x4_t a" + "uint8x8_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "uint32_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ @@ -47672,22 +251021,31 @@ ], "instructions": [ [ - "UMAXV" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmaxvq_u8", + "name": "vluti2_laneq_f16", "arguments": [ - "uint8x16_t a" + "float16x4_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "uint8_t" + "value": "float16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "index": { + "minimum": 0, + "maximum": 7 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ @@ -47695,83 +251053,95 @@ ], "instructions": [ [ - "UMAXV" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmin_f16", + "name": "vluti2_laneq_p16", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "poly16x4_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float16x4_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "index": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.4H" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmin_f32", + "name": "vluti2_laneq_p8", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "poly8x8_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float32x2_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.2S" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmin_f64", + "name": "vluti2_laneq_s16", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "int16x4_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float64x1_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "index": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Dm" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ @@ -47779,200 +251149,223 @@ ], "instructions": [ [ - "FMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmin_s16", + "name": "vluti2_laneq_s8", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "int8x8_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "int16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.4H" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmin_s32", + "name": "vluti2_laneq_u16", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "uint16x4_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "int32x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "index": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.2S" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmin_s8", + "name": "vluti2_laneq_u8", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint8x8_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "int8x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.8B" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmin_u16", + "name": "vluti2q_lane_f16", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "float16x8_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "uint16x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.4H" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmin_u32", + "name": "vluti2q_lane_p16", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "poly16x8_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "uint32x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.2S" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmin_u8", + "name": "vluti2q_lane_p8", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "poly8x16_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "uint8x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "index": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.8B" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminh_f16", + "name": "vluti2q_lane_s16", "arguments": [ - "float16_t a", - "float16_t b" + "int16x8_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Hm" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ @@ -47980,82 +251373,95 @@ ], "instructions": [ [ - "FMIN" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnm_f16", + "name": "vluti2q_lane_s8", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "int8x16_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "index": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.4H" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMINNM" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnm_f32", + "name": "vluti2q_lane_u16", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "uint16x8_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float32x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.2S" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMINNM" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnm_f64", + "name": "vluti2q_lane_u8", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "uint8x16_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float64x1_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "index": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Dm" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ @@ -48063,110 +251469,127 @@ ], "instructions": [ [ - "FMINNM" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnmh_f16", + "name": "vluti2q_laneq_f16", "arguments": [ - "float16_t a", - "float16_t b" + "float16x8_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float16_t" + "value": "float16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "index": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Hm" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMINNM" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnmq_f16", + "name": "vluti2q_laneq_p16", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "poly16x8_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float16x8_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "index": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.8H" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMINNM" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnmq_f32", + "name": "vluti2q_laneq_p8", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "poly8x16_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float32x4_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.4S" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMINNM" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnmq_f64", + "name": "vluti2q_laneq_s16", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "int16x8_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float64x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "index": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.2D" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ @@ -48174,22 +251597,31 @@ ], "instructions": [ [ - "FMINNM" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnmv_f16", + "name": "vluti2q_laneq_s8", "arguments": [ - "float16x4_t a" + "int8x16_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ @@ -48197,22 +251629,31 @@ ], "instructions": [ [ - "FMINNMP" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnmv_f32", + "name": "vluti2q_laneq_u16", "arguments": [ - "float32x2_t a" + "uint16x8_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float32_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "index": { + "minimum": 0, + "maximum": 7 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" } }, "Architectures": [ @@ -48220,22 +251661,31 @@ ], "instructions": [ [ - "FMINNMP" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnmvq_f16", + "name": "vluti2q_laneq_u8", "arguments": [ - "float16x8_t a" + "uint8x16_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float16_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ @@ -48243,22 +251693,34 @@ ], "instructions": [ [ - "FMINNMP" + "LUTI2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnmvq_f32", + "name": "vluti4q_lane_f16_x2", "arguments": [ - "float32x4_t a" + "float16x8x2_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float32_t" + "value": "float16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" } }, "Architectures": [ @@ -48266,22 +251728,34 @@ ], "instructions": [ [ - "FMINNMV" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminnmvq_f64", + "name": "vluti4q_lane_p16_x2", "arguments": [ - "float64x2_t a" + "poly16x8x2_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float64_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" } }, "Architectures": [ @@ -48289,83 +251763,98 @@ ], "instructions": [ [ - "FMINNMP" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminq_f16", + "name": "vluti4q_lane_p8", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "poly8x16_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "index": { + "minimum": 0, + "maximum": 0 }, - "b": { - "register": "Vm.8H" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMIN" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminq_f32", + "name": "vluti4q_lane_s16_x2", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "int16x8x2_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "index": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.4S" + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FMIN" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminq_f64", + "name": "vluti4q_lane_s8", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "int8x16_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "float64x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "index": { + "minimum": 0, + "maximum": 0 }, - "b": { - "register": "Vm.2D" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ @@ -48373,196 +251862,235 @@ ], "instructions": [ [ - "FMIN" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminq_s16", + "name": "vluti4q_lane_u16_x2", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "uint16x8x2_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "int16x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "index": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.8H" + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMIN" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminq_s32", + "name": "vluti4q_lane_u8", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "uint8x16_t vn", + "uint8x8_t vm", + "const int index" ], "return_type": { - "value": "int32x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "index": { + "minimum": 0, + "maximum": 0 }, - "b": { - "register": "Vm.4S" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMIN" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminq_s8", + "name": "vluti4q_laneq_f16_x2", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "float16x8x2_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "int8x16_t" + "value": "float16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.16B" + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMIN" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminq_u16", + "name": "vluti4q_laneq_p16_x2", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "poly16x8x2_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "uint16x8_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.8H" + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMIN" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminq_u32", + "name": "vluti4q_laneq_p8", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "poly8x16_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "uint32x4_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "index": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.4S" + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMIN" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminq_u8", + "name": "vluti4q_laneq_s16_x2", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "int16x8x2_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "uint8x16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "index": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.16B" + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMIN" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminv_f16", + "name": "vluti4q_laneq_s8", "arguments": [ - "float16x4_t a" + "int8x16_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ @@ -48570,22 +252098,34 @@ ], "instructions": [ [ - "FMINP" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminv_f32", + "name": "vluti4q_laneq_u16_x2", "arguments": [ - "float32x2_t a" + "uint16x8x2_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "float32_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" } }, "Architectures": [ @@ -48593,22 +252133,31 @@ ], "instructions": [ [ - "FMINP" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminv_s16", + "name": "vluti4q_laneq_u8", "arguments": [ - "int16x4_t a" + "uint8x16_t vn", + "uint8x16_t vm", + "const int index" ], "return_type": { - "value": "int16_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" } }, "Architectures": [ @@ -48616,68 +252165,83 @@ ], "instructions": [ [ - "SMINV" + "LUTI4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminv_s32", + "name": "vmax_f16", "arguments": [ - "int32x2_t a" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "int32_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "SMINP" + "FMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminv_s8", + "name": "vmax_f32", "arguments": [ - "int8x8_t a" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMINV" + "FMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminv_u16", + "name": "vmax_f64", "arguments": [ - "uint16x4_t a" + "float64x1_t a", + "float64x1_t b" ], "return_type": { - "value": "uint16_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Dn" + }, + "b": { + "register": "Dm" } }, "Architectures": [ @@ -48685,160 +252249,200 @@ ], "instructions": [ [ - "UMINV" + "FMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminv_u32", + "name": "vmax_s16", "arguments": [ - "uint32x2_t a" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "uint32_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMINP" + "SMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminv_u8", + "name": "vmax_s32", "arguments": [ - "uint8x8_t a" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "uint8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMINV" + "SMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminvq_f16", + "name": "vmax_s8", "arguments": [ - "float16x8_t a" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "float16_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINP" + "SMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminvq_f32", + "name": "vmax_u16", "arguments": [ - "float32x4_t a" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "float32_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINV" + "UMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminvq_f64", + "name": "vmax_u32", "arguments": [ - "float64x2_t a" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "float64_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINP" + "UMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminvq_s16", + "name": "vmax_u8", "arguments": [ - "int16x8_t a" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "int16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMINV" + "UMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminvq_s32", + "name": "vmaxh_f16", "arguments": [ - "int32x4_t a" + "float16_t a", + "float16_t b" ], "return_type": { - "value": "int32_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Hn" + }, + "b": { + "register": "Hm" } }, "Architectures": [ @@ -48846,68 +252450,82 @@ ], "instructions": [ [ - "SMINV" + "FMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminvq_s8", + "name": "vmaxnm_f16", "arguments": [ - "int8x16_t a" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "int8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "SMINV" + "FMAXNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminvq_u16", + "name": "vmaxnm_f32", "arguments": [ - "uint16x8_t a" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "uint16_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "UMINV" + "FMAXNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminvq_u32", + "name": "vmaxnm_f64", "arguments": [ - "uint32x4_t a" + "float64x1_t a", + "float64x1_t b" ], "return_type": { - "value": "uint32_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Dn" + }, + "b": { + "register": "Dm" } }, "Architectures": [ @@ -48915,408 +252533,309 @@ ], "instructions": [ [ - "UMINV" + "FMAXNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vminvq_u8", + "name": "vmaxnmh_f16", "arguments": [ - "uint8x16_t a" + "float16_t a", + "float16_t b" ], "return_type": { - "value": "uint8_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Hn" + }, + "b": { + "register": "Hm" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "UMINV" + "FMAXNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_f32", + "name": "vmaxnmq_f16", "arguments": [ - "float32x2_t a", - "float32x2_t b", - "float32x2_t c" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "float32x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "N/A" + "register": "Vn.8H" }, "b": { - "register": "N/A" - }, - "c": { - "register": "N/A" + "register": "Vm.8H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "RESULT[I]" + "FMAXNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_f64", + "name": "vmaxnmq_f32", "arguments": [ - "float64x1_t a", - "float64x1_t b", - "float64x1_t c" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "float64x1_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "N/A" + "register": "Vn.4S" }, "b": { - "register": "N/A" - }, - "c": { - "register": "N/A" + "register": "Vm.4S" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "RESULT[I]" + "FMAXNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_lane_f32", + "name": "vmaxnmq_f64", "arguments": [ - "float32x2_t a", - "float32x2_t b", - "float32x2_t v", - "const int lane" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "float32x2_t" + "value": "float64x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vn.2D" }, - "v": {} + "b": { + "register": "Vm.2D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "RESULT[I]" + "FMAXNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_lane_s16", + "name": "vmaxnmv_f16", "arguments": [ - "int16x4_t a", - "int16x4_t b", - "int16x4_t v", - "const int lane" + "float16x4_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_lane_s32", + "name": "vmaxnmv_f32", "arguments": [ - "int32x2_t a", - "int32x2_t b", - "int32x2_t v", - "const int lane" + "float32x2_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_lane_u16", + "name": "vmaxnmvq_f16", "arguments": [ - "uint16x4_t a", - "uint16x4_t b", - "uint16x4_t v", - "const int lane" + "float16x8_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_lane_u32", + "name": "vmaxnmvq_f32", "arguments": [ - "uint32x2_t a", - "uint32x2_t b", - "uint32x2_t v", - "const int lane" + "float32x4_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vn.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMAXNMV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_laneq_f32", + "name": "vmaxnmvq_f64", "arguments": [ - "float32x2_t a", - "float32x2_t b", - "float32x4_t v", - "const int lane" + "float64x2_t a" ], "return_type": { - "value": "float32x2_t" + "value": "float64_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": {} + "a": { + "register": "Vn.2D" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "RESULT[I]" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_laneq_s16", + "name": "vmaxq_f16", "arguments": [ - "int16x4_t a", - "int16x4_t b", - "int16x8_t v", - "const int lane" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "int16x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.8H" }, "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { "register": "Vm.8H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "MLA" + "FMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_laneq_s32", + "name": "vmaxq_f32", "arguments": [ - "int32x2_t a", - "int32x2_t b", - "int32x4_t v", - "const int lane" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int32x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.4S" }, "b": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MLA" + "FMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_laneq_u16", + "name": "vmaxq_f64", "arguments": [ - "uint16x4_t a", - "uint16x4_t b", - "uint16x8_t v", - "const int lane" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.2D" }, "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vm.2D" } }, "Architectures": [ @@ -49324,66 +252843,55 @@ ], "instructions": [ [ - "MLA" + "FMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_laneq_u32", + "name": "vmaxq_s16", "arguments": [ - "uint32x2_t a", - "uint32x2_t b", - "uint32x4_t v", - "const int lane" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.8H" }, "b": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MLA" + "SMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_n_f32", + "name": "vmaxq_s32", "arguments": [ - "float32x2_t a", - "float32x2_t b", - "float32_t c" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "float32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "N/A" + "register": "Vn.4S" }, "b": { - "register": "N/A" - }, - "c": { - "register": "N/A" + "register": "Vm.4S" } }, "Architectures": [ @@ -49393,30 +252901,26 @@ ], "instructions": [ [ - "RESULT[I]" + "SMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_n_s16", + "name": "vmaxq_s8", "arguments": [ - "int16x4_t a", - "int16x4_t b", - "int16_t c" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.16B" }, "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vm.16B" } }, "Architectures": [ @@ -49426,30 +252930,26 @@ ], "instructions": [ [ - "MLA" + "SMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_n_s32", + "name": "vmaxq_u16", "arguments": [ - "int32x2_t a", - "int32x2_t b", - "int32_t c" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int32x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.8H" }, "b": { - "register": "Vn.2S" - }, - "c": { - "register": "Vm.S[0]" + "register": "Vm.8H" } }, "Architectures": [ @@ -49459,30 +252959,26 @@ ], "instructions": [ [ - "MLA" + "UMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_n_u16", + "name": "vmaxq_u32", "arguments": [ - "uint16x4_t a", - "uint16x4_t b", - "uint16_t c" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.4S" }, "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vm.4S" } }, "Architectures": [ @@ -49492,30 +252988,26 @@ ], "instructions": [ [ - "MLA" + "UMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_n_u32", + "name": "vmaxq_u8", "arguments": [ - "uint32x2_t a", - "uint32x2_t b", - "uint32_t c" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.16B" }, "b": { - "register": "Vn.2S" - }, - "c": { - "register": "Vm.S[0]" + "register": "Vm.16B" } }, "Architectures": [ @@ -49525,233 +253017,160 @@ ], "instructions": [ [ - "MLA" + "UMAX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_s16", + "name": "vmaxv_f16", "arguments": [ - "int16x4_t a", - "int16x4_t b", - "int16x4_t c" + "float16x4_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { "register": "Vn.4H" - }, - "c": { - "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_s32", + "name": "vmaxv_f32", "arguments": [ - "int32x2_t a", - "int32x2_t b", - "int32x2_t c" + "float32x2_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { "register": "Vn.2S" - }, - "c": { - "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_s8", + "name": "vmaxv_s16", "arguments": [ - "int8x8_t a", - "int8x8_t b", - "int8x8_t c" + "int16x4_t a" ], "return_type": { - "value": "int8x8_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.8B" - }, - "c": { - "register": "Vm.8B" + "register": "Vn.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "SMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_u16", + "name": "vmaxv_s32", "arguments": [ - "uint16x4_t a", - "uint16x4_t b", - "uint16x4_t c" + "int32x2_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.4H" + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "SMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_u32", + "name": "vmaxv_s8", "arguments": [ - "uint32x2_t a", - "uint32x2_t b", - "uint32x2_t c" + "int8x8_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" - }, - "c": { - "register": "Vm.2S" + "register": "Vn.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "SMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmla_u8", + "name": "vmaxv_u16", "arguments": [ - "uint8x8_t a", - "uint8x8_t b", - "uint8x8_t c" + "uint16x4_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.8B" - }, - "c": { - "register": "Vm.8B" + "register": "Vn.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "UMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_lane_s16", + "name": "vmaxv_u32", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16x4_t v", - "const int lane" + "uint32x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Vm.2S" } }, "Architectures": [ @@ -49759,35 +253178,22 @@ ], "instructions": [ [ - "SMLAL2" + "UMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_lane_s32", + "name": "vmaxv_u8", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32x2_t v", - "const int lane" + "uint8x8_t a" ], "return_type": { - "value": "int64x2_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vn.8B" } }, "Architectures": [ @@ -49795,35 +253201,22 @@ ], "instructions": [ [ - "SMLAL2" + "UMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_lane_u16", + "name": "vmaxvq_f16", "arguments": [ - "uint32x4_t a", - "uint16x8_t b", - "uint16x4_t v", - "const int lane" + "float16x8_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" } }, "Architectures": [ @@ -49831,35 +253224,22 @@ ], "instructions": [ [ - "UMLAL2" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_lane_u32", + "name": "vmaxvq_f32", "arguments": [ - "uint64x2_t a", - "uint32x4_t b", - "uint32x2_t v", - "const int lane" + "float32x4_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" } }, "Architectures": [ @@ -49867,35 +253247,22 @@ ], "instructions": [ [ - "UMLAL2" + "FMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_laneq_s16", + "name": "vmaxvq_f64", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16x8_t v", - "const int lane" + "float64x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vn.2D" } }, "Architectures": [ @@ -49903,35 +253270,22 @@ ], "instructions": [ [ - "SMLAL2" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_laneq_s32", + "name": "vmaxvq_s16", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32x4_t v", - "const int lane" + "int16x8_t a" ], "return_type": { - "value": "int64x2_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vn.8H" } }, "Architectures": [ @@ -49939,35 +253293,22 @@ ], "instructions": [ [ - "SMLAL2" + "SMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_laneq_u16", + "name": "vmaxvq_s32", "arguments": [ - "uint32x4_t a", - "uint16x8_t b", - "uint16x8_t v", - "const int lane" + "int32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vn.4S" } }, "Architectures": [ @@ -49975,35 +253316,22 @@ ], "instructions": [ [ - "UMLAL2" + "SMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_laneq_u32", + "name": "vmaxvq_s8", "arguments": [ - "uint64x2_t a", - "uint32x4_t b", - "uint32x4_t v", - "const int lane" + "int8x16_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vn.16B" } }, "Architectures": [ @@ -50011,30 +253339,22 @@ ], "instructions": [ [ - "UMLAL2" + "SMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_n_s16", + "name": "vmaxvq_u16", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16_t c" + "uint16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.8H" - }, - "c": { - "register": "Vm.H[0]" } }, "Architectures": [ @@ -50042,30 +253362,22 @@ ], "instructions": [ [ - "SMLAL2" + "UMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_n_s32", + "name": "vmaxvq_u32", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32_t c" + "uint32x4_t a" ], "return_type": { - "value": "int64x2_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.4S" - }, - "c": { - "register": "Vm.S[0]" } }, "Architectures": [ @@ -50073,30 +253385,22 @@ ], "instructions": [ [ - "SMLAL2" + "UMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_n_u16", + "name": "vmaxvq_u8", "arguments": [ - "uint32x4_t a", - "uint16x8_t b", - "uint16_t c" + "uint8x16_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vn.16B" } }, "Architectures": [ @@ -50104,92 +253408,83 @@ ], "instructions": [ [ - "UMLAL2" + "UMAXV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_n_u32", + "name": "vmin_f16", "arguments": [ - "uint64x2_t a", - "uint32x4_t b", - "uint32_t c" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.4H" }, "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.S[0]" + "register": "Vm.4H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "UMLAL2" + "FMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_s16", + "name": "vmin_f32", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16x8_t c" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.2S" }, "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.8H" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMLAL2" + "FMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_s32", + "name": "vmin_f64", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32x4_t c" + "float64x1_t a", + "float64x1_t b" ], "return_type": { - "value": "int64x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Dn" }, "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.4S" + "register": "Dm" } }, "Architectures": [ @@ -50197,159 +253492,142 @@ ], "instructions": [ [ - "SMLAL2" + "FMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_s8", + "name": "vmin_s16", "arguments": [ - "int16x8_t a", - "int8x16_t b", - "int8x16_t c" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vn.4H" }, "b": { - "register": "Vn.16B" - }, - "c": { - "register": "Vm.16B" + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMLAL2" + "SMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_u16", + "name": "vmin_s32", "arguments": [ - "uint32x4_t a", - "uint16x8_t b", - "uint16x8_t c" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.2S" }, "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.8H" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMLAL2" + "SMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_u32", + "name": "vmin_s8", "arguments": [ - "uint64x2_t a", - "uint32x4_t b", - "uint32x4_t c" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8B" }, "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.4S" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMLAL2" + "SMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_high_u8", + "name": "vmin_u16", "arguments": [ - "uint16x8_t a", - "uint8x16_t b", - "uint8x16_t c" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vn.4H" }, "b": { - "register": "Vn.16B" - }, - "c": { - "register": "Vm.16B" + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMLAL2" + "UMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_lane_s16", + "name": "vmin_u32", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x4_t v", - "const int lane" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.2S" }, "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Vm.2S" } }, "Architectures": [ @@ -50359,35 +253637,26 @@ ], "instructions": [ [ - "SMLAL" + "UMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_lane_s32", + "name": "vmin_u8", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x2_t v", - "const int lane" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8B" }, "b": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vm.8B" } }, "Architectures": [ @@ -50397,147 +253666,109 @@ ], "instructions": [ [ - "SMLAL" + "UMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_lane_u16", + "name": "vminh_f16", "arguments": [ - "uint32x4_t a", - "uint16x4_t b", - "uint16x4_t v", - "const int lane" + "float16_t a", + "float16_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Hn" }, "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Hm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMLAL" + "FMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_lane_u32", + "name": "vminnm_f16", "arguments": [ - "uint64x2_t a", - "uint32x2_t b", - "uint32x2_t v", - "const int lane" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.4H" }, "b": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vm.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "UMLAL" + "FMINNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_laneq_s16", + "name": "vminnm_f32", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x8_t v", - "const int lane" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.2S" }, "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vm.2S" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "SMLAL" + "FMINNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_laneq_s32", + "name": "vminnm_f64", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x4_t v", - "const int lane" + "float64x1_t a", + "float64x1_t b" ], "return_type": { - "value": "int64x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Dn" }, "b": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Dm" } }, "Architectures": [ @@ -50545,366 +253776,280 @@ ], "instructions": [ [ - "SMLAL" + "FMINNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_laneq_u16", + "name": "vminnmh_f16", "arguments": [ - "uint32x4_t a", - "uint16x4_t b", - "uint16x8_t v", - "const int lane" + "float16_t a", + "float16_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Hn" }, "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Hm" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "UMLAL" + "FMINNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_laneq_u32", + "name": "vminnmq_f16", "arguments": [ - "uint64x2_t a", - "uint32x2_t b", - "uint32x4_t v", - "const int lane" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8H" }, "b": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "UMLAL" + "FMINNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_n_s16", + "name": "vminnmq_f32", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16_t c" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.4S" }, "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vm.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SMLAL" + "FMINNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_n_s32", + "name": "vminnmq_f64", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32_t c" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "int64x2_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.2D" }, "b": { - "register": "Vn.2S" - }, - "c": { - "register": "Vm.S[0]" + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMLAL" + "FMINNM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_n_u16", + "name": "vminnmv_f16", "arguments": [ - "uint32x4_t a", - "uint16x4_t b", - "uint16_t c" + "float16x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4H" - }, - "c": { - "register": "Vm.H[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMLAL" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_n_u32", + "name": "vminnmv_f32", "arguments": [ - "uint64x2_t a", - "uint32x2_t b", - "uint32_t c" + "float32x2_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.2S" - }, - "c": { - "register": "Vm.S[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMLAL" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_s16", + "name": "vminnmvq_f16", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x4_t c" + "float16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.4H" + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMLAL" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_s32", + "name": "vminnmvq_f32", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x2_t c" + "float32x4_t a" ], "return_type": { - "value": "int64x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.2S" - }, - "c": { - "register": "Vm.2S" + "register": "Vn.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMLAL" + "FMINNMV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_s8", + "name": "vminnmvq_f64", "arguments": [ - "int16x8_t a", - "int8x8_t b", - "int8x8_t c" + "float64x2_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8B" - }, - "c": { - "register": "Vm.8B" + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMLAL" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_u16", + "name": "vminq_f16", "arguments": [ - "uint32x4_t a", - "uint16x4_t b", - "uint16x4_t c" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.8H" }, "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.4H" + "register": "Vm.8H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "UMLAL" + "FMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_u32", + "name": "vminq_f32", "arguments": [ - "uint64x2_t a", - "uint32x2_t b", - "uint32x2_t c" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.4S" }, "b": { - "register": "Vn.2S" - }, - "c": { - "register": "Vm.2S" + "register": "Vm.4S" } }, "Architectures": [ @@ -50914,63 +254059,53 @@ ], "instructions": [ [ - "UMLAL" + "FMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlal_u8", + "name": "vminq_f64", "arguments": [ - "uint16x8_t a", - "uint8x8_t b", - "uint8x8_t c" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vn.2D" }, "b": { - "register": "Vn.8B" - }, - "c": { - "register": "Vm.8B" + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMLAL" + "FMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_f32", + "name": "vminq_s16", "arguments": [ - "float32x4_t a", - "float32x4_t b", - "float32x4_t c" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "float32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "N/A" + "register": "Vn.8H" }, "b": { - "register": "N/A" - }, - "c": { - "register": "N/A" + "register": "Vm.8H" } }, "Architectures": [ @@ -50980,61 +254115,56 @@ ], "instructions": [ [ - "RESULT[I]" + "SMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_f64", + "name": "vminq_s32", "arguments": [ - "float64x2_t a", - "float64x2_t b", - "float64x2_t c" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "float64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "N/A" + "register": "Vn.4S" }, "b": { - "register": "N/A" - }, - "c": { - "register": "N/A" + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "RESULT[I]" + "SMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_lane_f32", + "name": "vminq_s8", "arguments": [ - "float32x4_t a", - "float32x4_t b", - "float32x2_t v", - "const int lane" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "float32x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vn.16B" }, - "v": {} + "b": { + "register": "Vm.16B" + } }, "Architectures": [ "v7", @@ -51043,35 +254173,26 @@ ], "instructions": [ [ - "RESULT[I]" + "SMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_lane_s16", + "name": "vminq_u16", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16x4_t v", - "const int lane" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { "register": "Vn.8H" }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "b": { + "register": "Vm.8H" } }, "Architectures": [ @@ -51081,35 +254202,26 @@ ], "instructions": [ [ - "MLA" + "UMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_lane_s32", + "name": "vminq_u32", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32x2_t v", - "const int lane" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4S" }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "b": { + "register": "Vm.4S" } }, "Architectures": [ @@ -51119,35 +254231,26 @@ ], "instructions": [ [ - "MLA" + "UMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_lane_u16", + "name": "vminq_u8", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16x4_t v", - "const int lane" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vn.16B" }, "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Vm.16B" } }, "Architectures": [ @@ -51157,103 +254260,68 @@ ], "instructions": [ [ - "MLA" + "UMIN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_lane_u32", + "name": "vminv_f16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x2_t v", - "const int lane" + "float16x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vn.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_laneq_f32", + "name": "vminv_f32", "arguments": [ - "float32x4_t a", - "float32x4_t b", - "float32x4_t v", - "const int lane" + "float32x2_t a" ], "return_type": { - "value": "float32x4_t" + "value": "float32_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": {} + "a": { + "register": "Vn.2S" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "RESULT[I]" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_laneq_s16", + "name": "vminv_s16", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16x8_t v", - "const int lane" + "int16x4_t a" ], "return_type": { - "value": "int16x8_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vn.4H" } }, "Architectures": [ @@ -51261,35 +254329,22 @@ ], "instructions": [ [ - "MLA" + "SMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_laneq_s32", + "name": "vminv_s32", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32x4_t v", - "const int lane" + "int32x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vm.2S" } }, "Architectures": [ @@ -51297,35 +254352,22 @@ ], "instructions": [ [ - "MLA" + "SMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_laneq_u16", + "name": "vminv_s8", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16x8_t v", - "const int lane" + "int8x8_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vn.8B" } }, "Architectures": [ @@ -51333,35 +254375,22 @@ ], "instructions": [ [ - "MLA" + "SMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_laneq_u32", + "name": "vminv_u16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t v", - "const int lane" + "uint16x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vn.4H" } }, "Architectures": [ @@ -51369,376 +254398,266 @@ ], "instructions": [ [ - "MLA" + "UMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_n_f32", + "name": "vminv_u32", "arguments": [ - "float32x4_t a", - "float32x4_t b", - "float32_t c" + "uint32x2_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "N/A" - }, - "b": { - "register": "N/A" - }, - "c": { - "register": "N/A" + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "RESULT[I]" + "UMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_n_s16", + "name": "vminv_u8", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16_t c" + "uint8x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vn.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "UMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_n_s32", + "name": "vminvq_f16", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32_t c" + "float16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.S[0]" + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_n_u16", + "name": "vminvq_f32", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16_t c" + "float32x4_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vn.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_n_u32", + "name": "vminvq_f64", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32_t c" + "float64x2_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.S[0]" + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_s16", + "name": "vminvq_s16", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16x8_t c" + "int16x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { "register": "Vn.8H" - }, - "c": { - "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "SMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_s32", + "name": "vminvq_s32", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32x4_t c" + "int32x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4S" - }, - "c": { - "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "SMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_s8", + "name": "vminvq_s8", "arguments": [ - "int8x16_t a", - "int8x16_t b", - "int8x16_t c" + "int8x16_t a" ], "return_type": { - "value": "int8x16_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "b": { "register": "Vn.16B" - }, - "c": { - "register": "Vm.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "SMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_u16", + "name": "vminvq_u16", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16x8_t c" + "uint16x8_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { "register": "Vn.8H" - }, - "c": { - "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "UMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_u32", + "name": "vminvq_u32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c" + "uint32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4S" - }, - "c": { - "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "UMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlaq_u8", + "name": "vminvq_u8", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", - "uint8x16_t c" + "uint8x16_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "b": { "register": "Vn.16B" - }, - "c": { - "register": "Vm.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MLA" + "UMINV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_f32", + "name": "vmla_f32", "arguments": [ "float32x2_t a", "float32x2_t b", @@ -51765,13 +254684,13 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_f64", + "name": "vmla_f64", "arguments": [ "float64x1_t a", "float64x1_t b", @@ -51796,13 +254715,13 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_lane_f32", + "name": "vmla_lane_f32", "arguments": [ "float32x2_t a", "float32x2_t b", @@ -51813,13 +254732,10 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, "lane": { "minimum": 0, "maximum": 1 - }, - "v": {} + } }, "Architectures": [ "v7", @@ -51828,13 +254744,13 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_lane_s16", + "name": "vmla_lane_s16", "arguments": [ "int16x4_t a", "int16x4_t b", @@ -51866,13 +254782,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_lane_s32", + "name": "vmla_lane_s32", "arguments": [ "int32x2_t a", "int32x2_t b", @@ -51904,13 +254820,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_lane_u16", + "name": "vmla_lane_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b", @@ -51942,13 +254858,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_lane_u32", + "name": "vmla_lane_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b", @@ -51980,13 +254896,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_laneq_f32", + "name": "vmla_laneq_f32", "arguments": [ "float32x2_t a", "float32x2_t b", @@ -51997,26 +254913,23 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, "lane": { "minimum": 0, "maximum": 3 - }, - "v": {} + } }, "Architectures": [ "A64" ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_laneq_s16", + "name": "vmla_laneq_s16", "arguments": [ "int16x4_t a", "int16x4_t b", @@ -52046,13 +254959,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_laneq_s32", + "name": "vmla_laneq_s32", "arguments": [ "int32x2_t a", "int32x2_t b", @@ -52082,13 +254995,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_laneq_u16", + "name": "vmla_laneq_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b", @@ -52118,13 +255031,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_laneq_u32", + "name": "vmla_laneq_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b", @@ -52154,13 +255067,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_n_f32", + "name": "vmla_n_f32", "arguments": [ "float32x2_t a", "float32x2_t b", @@ -52187,13 +255100,13 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_n_s16", + "name": "vmla_n_s16", "arguments": [ "int16x4_t a", "int16x4_t b", @@ -52220,13 +255133,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_n_s32", + "name": "vmla_n_s32", "arguments": [ "int32x2_t a", "int32x2_t b", @@ -52253,13 +255166,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_n_u16", + "name": "vmla_n_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b", @@ -52286,13 +255199,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_n_u32", + "name": "vmla_n_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b", @@ -52319,13 +255232,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_s16", + "name": "vmla_s16", "arguments": [ "int16x4_t a", "int16x4_t b", @@ -52352,13 +255265,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_s32", + "name": "vmla_s32", "arguments": [ "int32x2_t a", "int32x2_t b", @@ -52385,13 +255298,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_s8", + "name": "vmla_s8", "arguments": [ "int8x8_t a", "int8x8_t b", @@ -52418,13 +255331,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_u16", + "name": "vmla_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b", @@ -52451,13 +255364,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_u32", + "name": "vmla_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b", @@ -52484,13 +255397,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmls_u8", + "name": "vmla_u8", "arguments": [ "uint8x8_t a", "uint8x8_t b", @@ -52517,13 +255430,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_lane_s16", + "name": "vmlal_high_lane_s16", "arguments": [ "int32x4_t a", "int16x8_t b", @@ -52553,13 +255466,13 @@ ], "instructions": [ [ - "SMLSL2" + "SMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_lane_s32", + "name": "vmlal_high_lane_s32", "arguments": [ "int64x2_t a", "int32x4_t b", @@ -52589,13 +255502,13 @@ ], "instructions": [ [ - "SMLSL2" + "SMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_lane_u16", + "name": "vmlal_high_lane_u16", "arguments": [ "uint32x4_t a", "uint16x8_t b", @@ -52625,13 +255538,13 @@ ], "instructions": [ [ - "UMLSL2" + "UMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_lane_u32", + "name": "vmlal_high_lane_u32", "arguments": [ "uint64x2_t a", "uint32x4_t b", @@ -52661,13 +255574,13 @@ ], "instructions": [ [ - "UMLSL2" + "UMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_laneq_s16", + "name": "vmlal_high_laneq_s16", "arguments": [ "int32x4_t a", "int16x8_t b", @@ -52697,13 +255610,13 @@ ], "instructions": [ [ - "SMLSL2" + "SMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_laneq_s32", + "name": "vmlal_high_laneq_s32", "arguments": [ "int64x2_t a", "int32x4_t b", @@ -52733,13 +255646,13 @@ ], "instructions": [ [ - "SMLSL2" + "SMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_laneq_u16", + "name": "vmlal_high_laneq_u16", "arguments": [ "uint32x4_t a", "uint16x8_t b", @@ -52769,13 +255682,13 @@ ], "instructions": [ [ - "UMLSL2" + "UMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_laneq_u32", + "name": "vmlal_high_laneq_u32", "arguments": [ "uint64x2_t a", "uint32x4_t b", @@ -52805,13 +255718,13 @@ ], "instructions": [ [ - "UMLSL2" + "UMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_n_s16", + "name": "vmlal_high_n_s16", "arguments": [ "int32x4_t a", "int16x8_t b", @@ -52836,13 +255749,13 @@ ], "instructions": [ [ - "SMLSL2" + "SMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_n_s32", + "name": "vmlal_high_n_s32", "arguments": [ "int64x2_t a", "int32x4_t b", @@ -52867,13 +255780,13 @@ ], "instructions": [ [ - "SMLSL2" + "SMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_n_u16", + "name": "vmlal_high_n_u16", "arguments": [ "uint32x4_t a", "uint16x8_t b", @@ -52898,13 +255811,13 @@ ], "instructions": [ [ - "UMLSL2" + "UMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_n_u32", + "name": "vmlal_high_n_u32", "arguments": [ "uint64x2_t a", "uint32x4_t b", @@ -52929,13 +255842,13 @@ ], "instructions": [ [ - "UMLSL2" + "UMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_s16", + "name": "vmlal_high_s16", "arguments": [ "int32x4_t a", "int16x8_t b", @@ -52960,13 +255873,13 @@ ], "instructions": [ [ - "SMLSL2" + "SMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_s32", + "name": "vmlal_high_s32", "arguments": [ "int64x2_t a", "int32x4_t b", @@ -52991,13 +255904,13 @@ ], "instructions": [ [ - "SMLSL2" + "SMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_s8", + "name": "vmlal_high_s8", "arguments": [ "int16x8_t a", "int8x16_t b", @@ -53022,13 +255935,13 @@ ], "instructions": [ [ - "SMLSL2" + "SMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_u16", + "name": "vmlal_high_u16", "arguments": [ "uint32x4_t a", "uint16x8_t b", @@ -53053,13 +255966,13 @@ ], "instructions": [ [ - "UMLSL2" + "UMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_u32", + "name": "vmlal_high_u32", "arguments": [ "uint64x2_t a", "uint32x4_t b", @@ -53084,13 +255997,13 @@ ], "instructions": [ [ - "UMLSL2" + "UMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_high_u8", + "name": "vmlal_high_u8", "arguments": [ "uint16x8_t a", "uint8x16_t b", @@ -53115,13 +256028,13 @@ ], "instructions": [ [ - "UMLSL2" + "UMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_lane_s16", + "name": "vmlal_lane_s16", "arguments": [ "int32x4_t a", "int16x4_t b", @@ -53153,13 +256066,13 @@ ], "instructions": [ [ - "SMLSL" + "SMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_lane_s32", + "name": "vmlal_lane_s32", "arguments": [ "int64x2_t a", "int32x2_t b", @@ -53191,13 +256104,13 @@ ], "instructions": [ [ - "SMLSL" + "SMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_lane_u16", + "name": "vmlal_lane_u16", "arguments": [ "uint32x4_t a", "uint16x4_t b", @@ -53229,13 +256142,13 @@ ], "instructions": [ [ - "UMLSL" + "UMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_lane_u32", + "name": "vmlal_lane_u32", "arguments": [ "uint64x2_t a", "uint32x2_t b", @@ -53267,13 +256180,13 @@ ], "instructions": [ [ - "UMLSL" + "UMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_laneq_s16", + "name": "vmlal_laneq_s16", "arguments": [ "int32x4_t a", "int16x4_t b", @@ -53303,13 +256216,13 @@ ], "instructions": [ [ - "SMLSL" + "SMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_laneq_s32", + "name": "vmlal_laneq_s32", "arguments": [ "int64x2_t a", "int32x2_t b", @@ -53339,13 +256252,13 @@ ], "instructions": [ [ - "SMLSL" + "SMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_laneq_u16", + "name": "vmlal_laneq_u16", "arguments": [ "uint32x4_t a", "uint16x4_t b", @@ -53375,13 +256288,13 @@ ], "instructions": [ [ - "UMLSL" + "UMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_laneq_u32", + "name": "vmlal_laneq_u32", "arguments": [ "uint64x2_t a", "uint32x2_t b", @@ -53411,13 +256324,13 @@ ], "instructions": [ [ - "UMLSL" + "UMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_n_s16", + "name": "vmlal_n_s16", "arguments": [ "int32x4_t a", "int16x4_t b", @@ -53444,13 +256357,13 @@ ], "instructions": [ [ - "SMLSL" + "SMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_n_s32", + "name": "vmlal_n_s32", "arguments": [ "int64x2_t a", "int32x2_t b", @@ -53477,13 +256390,13 @@ ], "instructions": [ [ - "SMLSL" + "SMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_n_u16", + "name": "vmlal_n_u16", "arguments": [ "uint32x4_t a", "uint16x4_t b", @@ -53510,13 +256423,13 @@ ], "instructions": [ [ - "UMLSL" + "UMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_n_u32", + "name": "vmlal_n_u32", "arguments": [ "uint64x2_t a", "uint32x2_t b", @@ -53543,13 +256456,13 @@ ], "instructions": [ [ - "UMLSL" + "UMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_s16", + "name": "vmlal_s16", "arguments": [ "int32x4_t a", "int16x4_t b", @@ -53576,13 +256489,13 @@ ], "instructions": [ [ - "SMLSL" + "SMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_s32", + "name": "vmlal_s32", "arguments": [ "int64x2_t a", "int32x2_t b", @@ -53609,13 +256522,13 @@ ], "instructions": [ [ - "SMLSL" + "SMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_s8", + "name": "vmlal_s8", "arguments": [ "int16x8_t a", "int8x8_t b", @@ -53642,13 +256555,13 @@ ], "instructions": [ [ - "SMLSL" + "SMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_u16", + "name": "vmlal_u16", "arguments": [ "uint32x4_t a", "uint16x4_t b", @@ -53675,13 +256588,13 @@ ], "instructions": [ [ - "UMLSL" + "UMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_u32", + "name": "vmlal_u32", "arguments": [ "uint64x2_t a", "uint32x2_t b", @@ -53708,13 +256621,13 @@ ], "instructions": [ [ - "UMLSL" + "UMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsl_u8", + "name": "vmlal_u8", "arguments": [ "uint16x8_t a", "uint8x8_t b", @@ -53741,13 +256654,13 @@ ], "instructions": [ [ - "UMLSL" + "UMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_f32", + "name": "vmlaq_f32", "arguments": [ "float32x4_t a", "float32x4_t b", @@ -53774,13 +256687,13 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_f64", + "name": "vmlaq_f64", "arguments": [ "float64x2_t a", "float64x2_t b", @@ -53805,13 +256718,13 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_lane_f32", + "name": "vmlaq_lane_f32", "arguments": [ "float32x4_t a", "float32x4_t b", @@ -53822,13 +256735,10 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, "lane": { "minimum": 0, "maximum": 1 - }, - "v": {} + } }, "Architectures": [ "v7", @@ -53837,13 +256747,13 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_lane_s16", + "name": "vmlaq_lane_s16", "arguments": [ "int16x8_t a", "int16x8_t b", @@ -53875,13 +256785,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_lane_s32", + "name": "vmlaq_lane_s32", "arguments": [ "int32x4_t a", "int32x4_t b", @@ -53913,13 +256823,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_lane_u16", + "name": "vmlaq_lane_u16", "arguments": [ "uint16x8_t a", "uint16x8_t b", @@ -53951,13 +256861,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_lane_u32", + "name": "vmlaq_lane_u32", "arguments": [ "uint32x4_t a", "uint32x4_t b", @@ -53989,13 +256899,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_laneq_f32", + "name": "vmlaq_laneq_f32", "arguments": [ "float32x4_t a", "float32x4_t b", @@ -54006,26 +256916,23 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, "lane": { "minimum": 0, "maximum": 3 - }, - "v": {} + } }, "Architectures": [ "A64" ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_laneq_s16", + "name": "vmlaq_laneq_s16", "arguments": [ "int16x8_t a", "int16x8_t b", @@ -54055,13 +256962,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_laneq_s32", + "name": "vmlaq_laneq_s32", "arguments": [ "int32x4_t a", "int32x4_t b", @@ -54091,13 +256998,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_laneq_u16", + "name": "vmlaq_laneq_u16", "arguments": [ "uint16x8_t a", "uint16x8_t b", @@ -54127,13 +257034,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_laneq_u32", + "name": "vmlaq_laneq_u32", "arguments": [ "uint32x4_t a", "uint32x4_t b", @@ -54163,13 +257070,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_n_f32", + "name": "vmlaq_n_f32", "arguments": [ "float32x4_t a", "float32x4_t b", @@ -54196,13 +257103,13 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_n_s16", + "name": "vmlaq_n_s16", "arguments": [ "int16x8_t a", "int16x8_t b", @@ -54229,13 +257136,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_n_s32", + "name": "vmlaq_n_s32", "arguments": [ "int32x4_t a", "int32x4_t b", @@ -54262,13 +257169,13 @@ ], "instructions": [ [ - "MLS" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmlsq_n_u16", + "name": "vmlaq_n_u16", "arguments": [ "uint16x8_t a", "uint16x8_t b", @@ -54285,500 +257192,7 @@ "register": "Vn.8H" }, "c": { - "register": "Vm.H[0]" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "MLS" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmlsq_n_u32", - "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32_t c" - ], - "return_type": { - "value": "uint32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.S[0]" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "MLS" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmlsq_s16", - "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16x8_t c" - ], - "return_type": { - "value": "int16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.8H" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "MLS" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmlsq_s32", - "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32x4_t c" - ], - "return_type": { - "value": "int32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.4S" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "MLS" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmlsq_s8", - "arguments": [ - "int8x16_t a", - "int8x16_t b", - "int8x16_t c" - ], - "return_type": { - "value": "int8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.16B" - }, - "c": { - "register": "Vm.16B" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "MLS" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmlsq_u16", - "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "uint16x8_t c" - ], - "return_type": { - "value": "uint16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.8H" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "MLS" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmlsq_u32", - "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c" - ], - "return_type": { - "value": "uint32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.4S" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "MLS" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmlsq_u8", - "arguments": [ - "uint8x16_t a", - "uint8x16_t b", - "uint8x16_t c" - ], - "return_type": { - "value": "uint8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.16B" - }, - "c": { - "register": "Vm.16B" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "MLS" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmmlaq_s32", - "arguments": [ - "int32x4_t r", - "int8x16_t a", - "int8x16_t b" - ], - "return_type": { - "value": "int32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" - }, - "r": { - "register": "Vd.4S" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "SMMLA" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmmlaq_u32", - "arguments": [ - "uint32x4_t r", - "uint8x16_t a", - "uint8x16_t b" - ], - "return_type": { - "value": "uint32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" - }, - "r": { - "register": "Vd.4S" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "UMMLA" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmov_n_f16", - "arguments": [ - "float16_t value" - ], - "return_type": { - "value": "float16x4_t" - }, - "Arguments_Preparation": { - "value": { - "register": "rn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "DUP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmov_n_f32", - "arguments": [ - "float32_t value" - ], - "return_type": { - "value": "float32x2_t" - }, - "Arguments_Preparation": { - "value": { - "register": "rn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "DUP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmov_n_f64", - "arguments": [ - "float64_t value" - ], - "return_type": { - "value": "float64x1_t" - }, - "Arguments_Preparation": { - "value": { - "register": "rn" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "DUP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmov_n_p16", - "arguments": [ - "poly16_t value" - ], - "return_type": { - "value": "poly16x4_t" - }, - "Arguments_Preparation": { - "value": { - "register": "rn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "DUP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmov_n_p8", - "arguments": [ - "poly8_t value" - ], - "return_type": { - "value": "poly8x8_t" - }, - "Arguments_Preparation": { - "value": { - "register": "rn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "DUP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmov_n_s16", - "arguments": [ - "int16_t value" - ], - "return_type": { - "value": "int16x4_t" - }, - "Arguments_Preparation": { - "value": { - "register": "rn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "DUP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmov_n_s32", - "arguments": [ - "int32_t value" - ], - "return_type": { - "value": "int32x2_t" - }, - "Arguments_Preparation": { - "value": { - "register": "rn" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "DUP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmov_n_s64", - "arguments": [ - "int64_t value" - ], - "return_type": { - "value": "int64x1_t" - }, - "Arguments_Preparation": { - "value": { - "register": "rn" + "register": "Vm.H[0]" } }, "Architectures": [ @@ -54788,22 +257202,30 @@ ], "instructions": [ [ - "DUP" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmov_n_s8", + "name": "vmlaq_n_u32", "arguments": [ - "int8_t value" + "uint32x4_t a", + "uint32x4_t b", + "uint32_t c" ], "return_type": { - "value": "int8x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ @@ -54813,22 +257235,30 @@ ], "instructions": [ [ - "DUP" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmov_n_u16", + "name": "vmlaq_s16", "arguments": [ - "uint16_t value" + "int16x8_t a", + "int16x8_t b", + "int16x8_t c" ], "return_type": { - "value": "uint16x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" } }, "Architectures": [ @@ -54838,22 +257268,30 @@ ], "instructions": [ [ - "DUP" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmov_n_u32", + "name": "vmlaq_s32", "arguments": [ - "uint32_t value" + "int32x4_t a", + "int32x4_t b", + "int32x4_t c" ], "return_type": { - "value": "uint32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" } }, "Architectures": [ @@ -54863,22 +257301,30 @@ ], "instructions": [ [ - "DUP" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmov_n_u64", + "name": "vmlaq_s8", "arguments": [ - "uint64_t value" + "int8x16_t a", + "int8x16_t b", + "int8x16_t c" ], "return_type": { - "value": "uint64x1_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" } }, "Architectures": [ @@ -54888,22 +257334,30 @@ ], "instructions": [ [ - "DUP" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmov_n_u8", + "name": "vmlaq_u16", "arguments": [ - "uint8_t value" + "uint16x8_t a", + "uint16x8_t b", + "uint16x8_t c" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" } }, "Architectures": [ @@ -54913,91 +257367,129 @@ ], "instructions": [ [ - "DUP" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_high_s16", + "name": "vmlaq_u32", "arguments": [ - "int16x8_t a" + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSHLL2" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_high_s32", + "name": "vmlaq_u8", "arguments": [ - "int32x4_t a" + "uint8x16_t a", + "uint8x16_t b", + "uint8x16_t c" ], "return_type": { - "value": "int64x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSHLL2" + "MLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_high_s8", + "name": "vmls_f32", "arguments": [ - "int8x16_t a" + "float32x2_t a", + "float32x2_t b", + "float32x2_t c" ], "return_type": { - "value": "int16x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "N/A" + }, + "b": { + "register": "N/A" + }, + "c": { + "register": "N/A" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSHLL2" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_high_u16", + "name": "vmls_f64", "arguments": [ - "uint16x8_t a" + "float64x1_t a", + "float64x1_t b", + "float64x1_t c" ], "return_type": { - "value": "uint32x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "N/A" + }, + "b": { + "register": "N/A" + }, + "c": { + "register": "N/A" } }, "Architectures": [ @@ -55005,68 +257497,102 @@ ], "instructions": [ [ - "USHLL2" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_high_u32", + "name": "vmls_lane_f32", "arguments": [ - "uint32x4_t a" + "float32x2_t a", + "float32x2_t b", + "float32x2_t v", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "float32x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "lane": { + "minimum": 0, + "maximum": 1 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USHLL2" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_high_u8", + "name": "vmls_lane_s16", "arguments": [ - "uint8x16_t a" + "int16x4_t a", + "int16x4_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USHLL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_s16", + "name": "vmls_lane_s32", "arguments": [ - "int16x4_t a" + "int32x2_t a", + "int32x2_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -55076,22 +257602,35 @@ ], "instructions": [ [ - "SSHLL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_s32", + "name": "vmls_lane_u16", "arguments": [ - "int32x2_t a" + "uint16x4_t a", + "uint16x4_t b", + "uint16x4_t v", + "const int lane" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -55101,22 +257640,35 @@ ], "instructions": [ [ - "SSHLL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_s8", + "name": "vmls_lane_u32", "arguments": [ - "int8x8_t a" + "uint32x2_t a", + "uint32x2_t b", + "uint32x2_t v", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -55126,159 +257678,201 @@ ], "instructions": [ [ - "SSHLL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_u16", + "name": "vmls_laneq_f32", "arguments": [ - "uint16x4_t a" + "float32x2_t a", + "float32x2_t b", + "float32x4_t v", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 3 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USHLL" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_u32", + "name": "vmls_laneq_s16", "arguments": [ - "uint32x2_t a" + "int16x4_t a", + "int16x4_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USHLL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovl_u8", + "name": "vmls_laneq_s32", "arguments": [ - "uint8x8_t a" + "int32x2_t a", + "int32x2_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USHLL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_high_s16", + "name": "vmls_laneq_u16", "arguments": [ - "int8x8_t r", - "int16x8_t a" + "uint16x4_t a", + "uint16x4_t b", + "uint16x8_t v", + "const int lane" ], "return_type": { - "value": "int8x16_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.4H" }, - "r": { - "register": "Vd.8B" + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "XTN2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_high_s32", + "name": "vmls_laneq_u32", "arguments": [ - "int16x4_t r", - "int32x4_t a" + "uint32x2_t a", + "uint32x2_t b", + "uint32x4_t v", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.2S" }, - "r": { - "register": "Vd.4H" + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "XTN2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_high_s64", + "name": "vmls_n_f32", "arguments": [ - "int32x2_t r", - "int64x2_t a" + "float32x2_t a", + "float32x2_t b", + "float32_t c" ], "return_type": { - "value": "int32x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "N/A" }, - "r": { - "register": "Vd.2S" + "b": { + "register": "N/A" + }, + "c": { + "register": "N/A" } }, "Architectures": [ @@ -55288,26 +257882,30 @@ ], "instructions": [ [ - "XTN2" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_high_u16", + "name": "vmls_n_s16", "arguments": [ - "uint8x8_t r", - "uint16x8_t a" + "int16x4_t a", + "int16x4_t b", + "int16_t c" ], "return_type": { - "value": "uint8x16_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.4H" }, - "r": { - "register": "Vd.8B" + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ @@ -55317,26 +257915,30 @@ ], "instructions": [ [ - "XTN2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_high_u32", + "name": "vmls_n_s32", "arguments": [ - "uint16x4_t r", - "uint32x4_t a" + "int32x2_t a", + "int32x2_t b", + "int32_t c" ], "return_type": { - "value": "uint16x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.2S" }, - "r": { - "register": "Vd.4H" + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ @@ -55346,26 +257948,30 @@ ], "instructions": [ [ - "XTN2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_high_u64", + "name": "vmls_n_u16", "arguments": [ - "uint32x2_t r", - "uint64x2_t a" + "uint16x4_t a", + "uint16x4_t b", + "uint16_t c" ], "return_type": { - "value": "uint32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.4H" }, - "r": { - "register": "Vd.2S" + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ @@ -55375,22 +257981,30 @@ ], "instructions": [ [ - "XTN2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_s16", + "name": "vmls_n_u32", "arguments": [ - "int16x8_t a" + "uint32x2_t a", + "uint32x2_t b", + "uint32_t c" ], "return_type": { - "value": "int8x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ @@ -55400,22 +258014,30 @@ ], "instructions": [ [ - "XTN" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_s32", + "name": "vmls_s16", "arguments": [ - "int32x4_t a" + "int16x4_t a", + "int16x4_t b", + "int16x4_t c" ], "return_type": { "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" } }, "Architectures": [ @@ -55425,22 +258047,30 @@ ], "instructions": [ [ - "XTN" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_s64", + "name": "vmls_s32", "arguments": [ - "int64x2_t a" + "int32x2_t a", + "int32x2_t b", + "int32x2_t c" ], "return_type": { "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" } }, "Architectures": [ @@ -55450,22 +258080,30 @@ ], "instructions": [ [ - "XTN" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_u16", + "name": "vmls_s8", "arguments": [ - "uint16x8_t a" + "int8x8_t a", + "int8x8_t b", + "int8x8_t c" ], "return_type": { - "value": "uint8x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" } }, "Architectures": [ @@ -55475,22 +258113,30 @@ ], "instructions": [ [ - "XTN" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_u32", + "name": "vmls_u16", "arguments": [ - "uint32x4_t a" + "uint16x4_t a", + "uint16x4_t b", + "uint16x4_t c" ], "return_type": { "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" } }, "Architectures": [ @@ -55500,22 +258146,30 @@ ], "instructions": [ [ - "XTN" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovn_u64", + "name": "vmls_u32", "arguments": [ - "uint64x2_t a" + "uint32x2_t a", + "uint32x2_t b", + "uint32x2_t c" ], "return_type": { "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" } }, "Architectures": [ @@ -55525,22 +258179,30 @@ ], "instructions": [ [ - "XTN" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_f16", + "name": "vmls_u8", "arguments": [ - "float16_t value" + "uint8x8_t a", + "uint8x8_t b", + "uint8x8_t c" ], "return_type": { - "value": "float16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" } }, "Architectures": [ @@ -55550,47 +258212,71 @@ ], "instructions": [ [ - "DUP" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_f32", + "name": "vmlsl_high_lane_s16", "arguments": [ - "float32_t value" + "int32x4_t a", + "int16x8_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "float32x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "SMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_f64", + "name": "vmlsl_high_lane_s32", "arguments": [ - "float64_t value" + "int64x2_t a", + "int32x4_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "float64x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -55598,333 +258284,432 @@ ], "instructions": [ [ - "DUP" + "SMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_p16", + "name": "vmlsl_high_lane_u16", "arguments": [ - "poly16_t value" + "uint32x4_t a", + "uint16x8_t b", + "uint16x4_t v", + "const int lane" ], "return_type": { - "value": "poly16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "UMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_p8", + "name": "vmlsl_high_lane_u32", "arguments": [ - "poly8_t value" + "uint64x2_t a", + "uint32x4_t b", + "uint32x2_t v", + "const int lane" ], "return_type": { - "value": "poly8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "UMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_s16", + "name": "vmlsl_high_laneq_s16", "arguments": [ - "int16_t value" + "int32x4_t a", + "int16x8_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "SMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_s32", + "name": "vmlsl_high_laneq_s32", "arguments": [ - "int32_t value" + "int64x2_t a", + "int32x4_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "SMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_s64", + "name": "vmlsl_high_laneq_u16", "arguments": [ - "int64_t value" + "uint32x4_t a", + "uint16x8_t b", + "uint16x8_t v", + "const int lane" ], "return_type": { - "value": "int64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "UMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_s8", + "name": "vmlsl_high_laneq_u32", "arguments": [ - "int8_t value" + "uint64x2_t a", + "uint32x4_t b", + "uint32x4_t v", + "const int lane" ], "return_type": { - "value": "int8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "UMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_u16", + "name": "vmlsl_high_n_s16", "arguments": [ - "uint16_t value" + "int32x4_t a", + "int16x8_t b", + "int16_t c" ], "return_type": { - "value": "uint16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "SMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_u32", + "name": "vmlsl_high_n_s32", "arguments": [ - "uint32_t value" + "int64x2_t a", + "int32x4_t b", + "int32_t c" ], "return_type": { - "value": "uint32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "SMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_u64", + "name": "vmlsl_high_n_u16", "arguments": [ - "uint64_t value" + "uint32x4_t a", + "uint16x8_t b", + "uint16_t c" ], "return_type": { - "value": "uint64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "UMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmovq_n_u8", + "name": "vmlsl_high_n_u32", "arguments": [ - "uint8_t value" + "uint64x2_t a", + "uint32x4_t b", + "uint32_t c" ], "return_type": { - "value": "uint8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "value": { - "register": "rn" + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "DUP" + "UMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_f16", + "name": "vmlsl_high_s16", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "int32x4_t a", + "int16x8_t b", + "int16x8_t c" ], "return_type": { - "value": "float16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.4S" }, "b": { - "register": "Vm.4H" + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMUL" + "SMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_f32", + "name": "vmlsl_high_s32", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "int64x2_t a", + "int32x4_t b", + "int32x4_t c" ], "return_type": { - "value": "float32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.2D" }, "b": { - "register": "Vm.2S" + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FMUL" + "SMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_f64", + "name": "vmlsl_high_s8", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "int16x8_t a", + "int8x16_t b", + "int8x16_t c" ], "return_type": { - "value": "float64x1_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.8H" }, "b": { - "register": "Dm" + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" } }, "Architectures": [ @@ -55932,98 +258717,92 @@ ], "instructions": [ [ - "FMUL" + "SMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_lane_f16", + "name": "vmlsl_high_u16", "arguments": [ - "float16x4_t a", - "float16x4_t v", - "const int lane" + "uint32x4_t a", + "uint16x8_t b", + "uint16x8_t c" ], "return_type": { - "value": "float16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.4S" }, - "lane": { - "minimum": 0, - "maximum": 3 + "b": { + "register": "Vn.8H" }, - "v": { - "register": "Vm.4H" + "c": { + "register": "Vm.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMUL" + "UMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_lane_f32", + "name": "vmlsl_high_u32", "arguments": [ - "float32x2_t a", - "float32x2_t v", - "const int lane" + "uint64x2_t a", + "uint32x4_t b", + "uint32x4_t c" ], "return_type": { - "value": "float32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.2D" }, - "lane": { - "minimum": 0, - "maximum": 1 + "b": { + "register": "Vn.4S" }, - "v": { - "register": "Vm.2S" + "c": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FMUL" + "UMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_lane_f64", + "name": "vmlsl_high_u8", "arguments": [ - "float64x1_t a", - "float64x1_t v", - "const int lane" + "uint16x8_t a", + "uint8x16_t b", + "uint8x16_t c" ], "return_type": { - "value": "float64x1_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.8H" }, - "lane": { - "minimum": 0, - "maximum": 0 + "b": { + "register": "Vn.16B" }, - "v": { - "register": "Vm.1D" + "c": { + "register": "Vm.16B" } }, "Architectures": [ @@ -56031,23 +258810,27 @@ ], "instructions": [ [ - "FMUL" + "UMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_lane_s16", + "name": "vmlsl_lane_s16", "arguments": [ - "int16x4_t a", + "int32x4_t a", + "int16x4_t b", "int16x4_t v", "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.4S" + }, + "b": { "register": "Vn.4H" }, "lane": { @@ -56065,23 +258848,27 @@ ], "instructions": [ [ - "MUL" + "SMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_lane_s32", + "name": "vmlsl_lane_s32", "arguments": [ - "int32x2_t a", + "int64x2_t a", + "int32x2_t b", "int32x2_t v", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.2D" + }, + "b": { "register": "Vn.2S" }, "lane": { @@ -56099,23 +258886,27 @@ ], "instructions": [ [ - "MUL" + "SMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_lane_u16", + "name": "vmlsl_lane_u16", "arguments": [ - "uint16x4_t a", + "uint32x4_t a", + "uint16x4_t b", "uint16x4_t v", "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.4S" + }, + "b": { "register": "Vn.4H" }, "lane": { @@ -56133,23 +258924,27 @@ ], "instructions": [ [ - "MUL" + "UMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_lane_u32", + "name": "vmlsl_lane_u32", "arguments": [ - "uint32x2_t a", + "uint64x2_t a", + "uint32x2_t b", "uint32x2_t v", "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.2D" + }, + "b": { "register": "Vn.2S" }, "lane": { @@ -56167,23 +258962,27 @@ ], "instructions": [ [ - "MUL" + "UMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_laneq_f16", + "name": "vmlsl_laneq_s16", "arguments": [ - "float16x4_t a", - "float16x8_t v", + "int32x4_t a", + "int16x4_t b", + "int16x8_t v", "const int lane" ], "return_type": { - "value": "float16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.4S" + }, + "b": { "register": "Vn.4H" }, "lane": { @@ -56199,23 +258998,27 @@ ], "instructions": [ [ - "FMUL" + "SMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_laneq_f32", + "name": "vmlsl_laneq_s32", "arguments": [ - "float32x2_t a", - "float32x4_t v", + "int64x2_t a", + "int32x2_t b", + "int32x4_t v", "const int lane" ], "return_type": { - "value": "float32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.2D" + }, + "b": { "register": "Vn.2S" }, "lane": { @@ -56231,55 +259034,27 @@ ], "instructions": [ [ - "FMUL" + "SMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_laneq_f64", + "name": "vmlsl_laneq_u16", "arguments": [ - "float64x1_t a", - "float64x2_t v", + "uint32x4_t a", + "uint16x4_t b", + "uint16x8_t v", "const int lane" ], "return_type": { - "value": "float64x1_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "register": "Vd.4S" }, - "v": { - "register": "Vm.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FMUL" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vmul_laneq_s16", - "arguments": [ - "int16x4_t a", - "int16x8_t v", - "const int lane" - ], - "return_type": { - "value": "int16x4_t" - }, - "Arguments_Preparation": { - "a": { + "b": { "register": "Vn.4H" }, "lane": { @@ -56295,23 +259070,27 @@ ], "instructions": [ [ - "MUL" + "UMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_laneq_s32", + "name": "vmlsl_laneq_u32", "arguments": [ - "int32x2_t a", - "int32x4_t v", + "uint64x2_t a", + "uint32x2_t b", + "uint32x4_t v", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.2D" + }, + "b": { "register": "Vn.2S" }, "lane": { @@ -56327,117 +259106,128 @@ ], "instructions": [ [ - "MUL" + "UMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_laneq_u16", + "name": "vmlsl_n_s16", "arguments": [ - "uint16x4_t a", - "uint16x8_t v", - "const int lane" + "int32x4_t a", + "int16x4_t b", + "int16_t c" ], "return_type": { - "value": "uint16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.4S" }, - "lane": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Vn.4H" }, - "v": { - "register": "Vm.8H" + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MUL" + "SMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_laneq_u32", + "name": "vmlsl_n_s32", "arguments": [ - "uint32x2_t a", - "uint32x4_t v", - "const int lane" + "int64x2_t a", + "int32x2_t b", + "int32_t c" ], "return_type": { - "value": "uint32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.2D" }, - "lane": { - "minimum": 0, - "maximum": 3 + "b": { + "register": "Vn.2S" }, - "v": { - "register": "Vm.4S" + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MUL" + "SMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_n_f16", + "name": "vmlsl_n_u16", "arguments": [ - "float16x4_t a", - "float16_t n" + "uint32x4_t a", + "uint16x4_t b", + "uint16_t c" ], "return_type": { - "value": "float16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.4S" + }, + "b": { "register": "Vn.4H" }, - "n": { + "c": { "register": "Vm.H[0]" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FMUL" + "UMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_n_f32", + "name": "vmlsl_n_u32", "arguments": [ - "float32x2_t a", - "float32_t b" + "uint64x2_t a", + "uint32x2_t b", + "uint32_t c" ], "return_type": { - "value": "float32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.2D" }, "b": { + "register": "Vn.2S" + }, + "c": { "register": "Vm.S[0]" } }, @@ -56448,53 +259238,63 @@ ], "instructions": [ [ - "FMUL" + "UMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_n_f64", + "name": "vmlsl_s16", "arguments": [ - "float64x1_t a", - "float64_t b" + "int32x4_t a", + "int16x4_t b", + "int16x4_t c" ], "return_type": { - "value": "float64x1_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.4S" }, "b": { - "register": "Vm.D[0]" + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMUL" + "SMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_n_s16", + "name": "vmlsl_s32", "arguments": [ - "int16x4_t a", - "int16_t b" + "int64x2_t a", + "int32x2_t b", + "int32x2_t c" ], "return_type": { - "value": "int16x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.2D" }, "b": { - "register": "Vm.H[0]" + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" } }, "Architectures": [ @@ -56504,26 +259304,30 @@ ], "instructions": [ [ - "MUL" + "SMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_n_s32", + "name": "vmlsl_s8", "arguments": [ - "int32x2_t a", - "int32_t b" + "int16x8_t a", + "int8x8_t b", + "int8x8_t c" ], "return_type": { - "value": "int32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.8H" }, "b": { - "register": "Vm.S[0]" + "register": "Vn.8B" + }, + "c": { + "register": "Vm.8B" } }, "Architectures": [ @@ -56533,26 +259337,30 @@ ], "instructions": [ [ - "MUL" + "SMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_n_u16", + "name": "vmlsl_u16", "arguments": [ - "uint16x4_t a", - "uint16_t b" + "uint32x4_t a", + "uint16x4_t b", + "uint16x4_t c" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.4S" }, "b": { - "register": "Vm.H[0]" + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" } }, "Architectures": [ @@ -56562,26 +259370,30 @@ ], "instructions": [ [ - "MUL" + "UMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_n_u32", + "name": "vmlsl_u32", "arguments": [ - "uint32x2_t a", - "uint32_t b" + "uint64x2_t a", + "uint32x2_t b", + "uint32x2_t c" ], "return_type": { - "value": "uint32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.2D" }, "b": { - "register": "Vm.S[0]" + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" } }, "Architectures": [ @@ -56591,25 +259403,29 @@ ], "instructions": [ [ - "MUL" + "UMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_p8", + "name": "vmlsl_u8", "arguments": [ - "poly8x8_t a", - "poly8x8_t b" + "uint16x8_t a", + "uint8x8_t b", + "uint8x8_t c" ], "return_type": { - "value": "poly8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vd.8H" }, "b": { + "register": "Vn.8B" + }, + "c": { "register": "Vm.8B" } }, @@ -56620,26 +259436,30 @@ ], "instructions": [ [ - "PMUL" + "UMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_s16", + "name": "vmlsq_f32", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "float32x4_t a", + "float32x4_t b", + "float32x4_t c" ], "return_type": { - "value": "int16x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "N/A" }, "b": { - "register": "Vm.4H" + "register": "N/A" + }, + "c": { + "register": "N/A" } }, "Architectures": [ @@ -56649,55 +259469,57 @@ ], "instructions": [ [ - "MUL" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_s32", + "name": "vmlsq_f64", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "float64x2_t a", + "float64x2_t b", + "float64x2_t c" ], "return_type": { - "value": "int32x2_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "N/A" }, "b": { - "register": "Vm.2S" + "register": "N/A" + }, + "c": { + "register": "N/A" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MUL" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_s8", + "name": "vmlsq_lane_f32", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "float32x4_t a", + "float32x4_t b", + "float32x2_t v", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "float32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "lane": { + "minimum": 0, + "maximum": 1 } }, "Architectures": [ @@ -56707,25 +259529,34 @@ ], "instructions": [ [ - "MUL" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_u16", + "name": "vmlsq_lane_s16", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "int16x8_t a", + "int16x8_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.8H" }, "b": { + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { "register": "Vm.4H" } }, @@ -56736,25 +259567,34 @@ ], "instructions": [ [ - "MUL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_u32", + "name": "vmlsq_lane_s32", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "int32x4_t a", + "int32x4_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.4S" }, "b": { + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { "register": "Vm.2S" } }, @@ -56765,26 +259605,35 @@ ], "instructions": [ [ - "MUL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmul_u8", + "name": "vmlsq_lane_u16", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "uint16x8_t a", + "uint16x8_t b", + "uint16x4_t v", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vd.8H" }, "b": { - "register": "Vm.8B" + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -56794,63 +259643,64 @@ ], "instructions": [ [ - "MUL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmuld_lane_f64", + "name": "vmlsq_lane_u32", "arguments": [ - "float64_t a", - "float64x1_t v", + "uint32x4_t a", + "uint32x4_t b", + "uint32x2_t v", "const int lane" ], "return_type": { - "value": "float64_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" }, "lane": { "minimum": 0, - "maximum": 0 + "maximum": 1 }, "v": { - "register": "Vm.1D" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMUL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmuld_laneq_f64", + "name": "vmlsq_laneq_f32", "arguments": [ - "float64_t a", - "float64x2_t v", + "float32x4_t a", + "float32x4_t b", + "float32x4_t v", "const int lane" ], "return_type": { - "value": "float64_t" + "value": "float32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" - }, "lane": { "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2D" + "maximum": 3 } }, "Architectures": [ @@ -56858,59 +259708,71 @@ ], "instructions": [ [ - "FMUL" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulh_f16", + "name": "vmlsq_laneq_s16", "arguments": [ - "float16_t a", - "float16_t b" + "int16x8_t a", + "int16x8_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "float16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.8H" }, "b": { - "register": "Hm" + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FMUL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulh_lane_f16", + "name": "vmlsq_laneq_s32", "arguments": [ - "float16_t a", - "float16x4_t v", + "int32x4_t a", + "int32x4_t b", + "int32x4_t v", "const int lane" ], "return_type": { - "value": "float16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" }, "lane": { "minimum": 0, "maximum": 3 }, "v": { - "register": "Vm.4H" + "register": "Vm.4S" } }, "Architectures": [ @@ -56918,24 +259780,28 @@ ], "instructions": [ [ - "FMUL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulh_laneq_f16", + "name": "vmlsq_laneq_u16", "arguments": [ - "float16_t a", - "float16x8_t v", + "uint16x8_t a", + "uint16x8_t b", + "uint16x8_t v", "const int lane" ], "return_type": { - "value": "float16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" }, "lane": { "minimum": 0, @@ -56950,31 +259816,35 @@ ], "instructions": [ [ - "FMUL" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_lane_s16", + "name": "vmlsq_laneq_u32", "arguments": [ - "int16x8_t a", - "int16x4_t v", + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t v", "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" }, "lane": { "minimum": 0, "maximum": 3 }, "v": { - "register": "Vm.4H" + "register": "Vm.4S" } }, "Architectures": [ @@ -56982,358 +259852,393 @@ ], "instructions": [ [ - "SMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_lane_s32", + "name": "vmlsq_n_f32", "arguments": [ - "int32x4_t a", - "int32x2_t v", - "const int lane" + "float32x4_t a", + "float32x4_t b", + "float32_t c" ], "return_type": { - "value": "int64x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "N/A" }, - "lane": { - "minimum": 0, - "maximum": 1 + "b": { + "register": "N/A" }, - "v": { - "register": "Vm.2S" + "c": { + "register": "N/A" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMULL2" + "result" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_lane_u16", + "name": "vmlsq_n_s16", "arguments": [ - "uint16x8_t a", - "uint16x4_t v", - "const int lane" + "int16x8_t a", + "int16x8_t b", + "int16_t c" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.8H" }, - "lane": { - "minimum": 0, - "maximum": 3 + "b": { + "register": "Vn.8H" }, - "v": { - "register": "Vm.4H" + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_lane_u32", + "name": "vmlsq_n_s32", "arguments": [ - "uint32x4_t a", - "uint32x2_t v", - "const int lane" + "int32x4_t a", + "int32x4_t b", + "int32_t c" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4S" }, - "lane": { - "minimum": 0, - "maximum": 1 + "b": { + "register": "Vn.4S" }, - "v": { - "register": "Vm.2S" + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_laneq_s16", + "name": "vmlsq_n_u16", "arguments": [ - "int16x8_t a", - "int16x8_t v", - "const int lane" + "uint16x8_t a", + "uint16x8_t b", + "uint16_t c" ], "return_type": { - "value": "int32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.8H" }, - "lane": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Vn.8H" }, - "v": { - "register": "Vm.8H" + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_laneq_s32", + "name": "vmlsq_n_u32", "arguments": [ - "int32x4_t a", - "int32x4_t v", - "const int lane" + "uint32x4_t a", + "uint32x4_t b", + "uint32_t c" ], "return_type": { - "value": "int64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4S" }, - "lane": { - "minimum": 0, - "maximum": 3 + "b": { + "register": "Vn.4S" }, - "v": { - "register": "Vm.4S" + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_laneq_u16", + "name": "vmlsq_s16", "arguments": [ - "uint16x8_t a", - "uint16x8_t v", - "const int lane" + "int16x8_t a", + "int16x8_t b", + "int16x8_t c" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.8H" }, - "lane": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Vn.8H" }, - "v": { + "c": { "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_laneq_u32", + "name": "vmlsq_s32", "arguments": [ - "uint32x4_t a", - "uint32x4_t v", - "const int lane" + "int32x4_t a", + "int32x4_t b", + "int32x4_t c" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4S" }, - "lane": { - "minimum": 0, - "maximum": 3 + "b": { + "register": "Vn.4S" }, - "v": { + "c": { "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_n_s16", + "name": "vmlsq_s8", "arguments": [ - "int16x8_t a", - "int16_t b" + "int8x16_t a", + "int8x16_t b", + "int8x16_t c" ], "return_type": { - "value": "int32x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.16B" }, "b": { - "register": "Vm.H[0]" + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_n_s32", + "name": "vmlsq_u16", "arguments": [ - "int32x4_t a", - "int32_t b" + "uint16x8_t a", + "uint16x8_t b", + "uint16x8_t c" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.8H" }, "b": { - "register": "Vm.S[0]" + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_n_u16", + "name": "vmlsq_u32", "arguments": [ - "uint16x8_t a", - "uint16_t b" + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c" ], "return_type": { "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.4S" }, "b": { - "register": "Vm.H[0]" + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_n_u32", + "name": "vmlsq_u8", "arguments": [ - "uint32x4_t a", - "uint32_t b" + "uint8x16_t a", + "uint8x16_t b", + "uint8x16_t c" ], "return_type": { - "value": "uint64x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.16B" }, "b": { - "register": "Vm.S[0]" + "register": "Vn.16B" + }, + "c": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMULL2" + "MLS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_p64", + "name": "vmmlaq_s32", "arguments": [ - "poly64x2_t a", - "poly64x2_t b" + "int32x4_t r", + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "poly128_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.16B" }, "b": { - "register": "Vm.2D" + "register": "Vm.16B" + }, + "r": { + "register": "Vd.4S" } }, "Architectures": [ @@ -57342,19 +260247,20 @@ ], "instructions": [ [ - "PMULL2" + "SMMLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_p8", + "name": "vmmlaq_u32", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "uint32x4_t r", + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { @@ -57362,87 +260268,83 @@ }, "b": { "register": "Vm.16B" + }, + "r": { + "register": "Vd.4S" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "PMULL2" + "UMMLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_s16", + "name": "vmov_n_f16", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "float16_t value" ], "return_type": { - "value": "int32x4_t" + "value": "float16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMULL2" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_s32", + "name": "vmov_n_f32", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "float32_t value" ], "return_type": { - "value": "int64x2_t" + "value": "float32x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMULL2" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_s8", + "name": "vmov_n_f64", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "float64_t value" ], "return_type": { - "value": "int16x8_t" + "value": "float64x1_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "value": { + "register": "rn" } }, "Architectures": [ @@ -57450,112 +260352,147 @@ ], "instructions": [ [ - "SMULL2" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_u16", + "name": "vmov_n_p16", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "poly16_t value" ], "return_type": { - "value": "uint32x4_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMULL2" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_u32", + "name": "vmov_n_p8", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "poly8_t value" ], "return_type": { - "value": "uint64x2_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMULL2" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_high_u8", + "name": "vmov_n_s16", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "int16_t value" ], "return_type": { - "value": "uint16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMULL2" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_lane_s16", + "name": "vmov_n_s32", "arguments": [ - "int16x4_t a", - "int16x4_t v", - "const int lane" + "int32_t value" ], "return_type": { - "value": "int32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vmov_n_s64", + "arguments": [ + "int64_t value" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vmov_n_s8", + "arguments": [ + "int8_t value" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "value": { + "register": "rn" } }, "Architectures": [ @@ -57565,31 +260502,22 @@ ], "instructions": [ [ - "SMULL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_lane_s32", + "name": "vmov_n_u16", "arguments": [ - "int32x2_t a", - "int32x2_t v", - "const int lane" + "uint16_t value" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "value": { + "register": "rn" } }, "Architectures": [ @@ -57599,31 +260527,22 @@ ], "instructions": [ [ - "SMULL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_lane_u16", + "name": "vmov_n_u32", "arguments": [ - "uint16x4_t a", - "uint16x4_t v", - "const int lane" + "uint32_t value" ], "return_type": { - "value": "uint32x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "value": { + "register": "rn" } }, "Architectures": [ @@ -57633,31 +260552,22 @@ ], "instructions": [ [ - "UMULL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_lane_u32", + "name": "vmov_n_u64", "arguments": [ - "uint32x2_t a", - "uint32x2_t v", - "const int lane" + "uint64_t value" ], "return_type": { - "value": "uint64x2_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "value": { + "register": "rn" } }, "Architectures": [ @@ -57667,63 +260577,47 @@ ], "instructions": [ [ - "UMULL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_laneq_s16", + "name": "vmov_n_u8", "arguments": [ - "int16x4_t a", - "int16x8_t v", - "const int lane" + "uint8_t value" ], "return_type": { - "value": "int32x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMULL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_laneq_s32", + "name": "vmovl_high_s16", "arguments": [ - "int32x2_t a", - "int32x4_t v", - "const int lane" + "int16x8_t a" ], "return_type": { - "value": "int64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vn.8H" } }, "Architectures": [ @@ -57731,31 +260625,22 @@ ], "instructions": [ [ - "SMULL" + "SSHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_laneq_u16", + "name": "vmovl_high_s32", "arguments": [ - "uint16x4_t a", - "uint16x8_t v", - "const int lane" + "int32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vn.4S" } }, "Architectures": [ @@ -57763,31 +260648,22 @@ ], "instructions": [ [ - "UMULL" + "SSHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_laneq_u32", + "name": "vmovl_high_s8", "arguments": [ - "uint32x2_t a", - "uint32x4_t v", - "const int lane" + "int8x16_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vn.16B" } }, "Architectures": [ @@ -57795,113 +260671,91 @@ ], "instructions": [ [ - "UMULL" + "SSHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_n_s16", + "name": "vmovl_high_u16", "arguments": [ - "int16x4_t a", - "int16_t b" + "uint16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.H[0]" + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMULL" + "USHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_n_s32", + "name": "vmovl_high_u32", "arguments": [ - "int32x2_t a", - "int32_t b" + "uint32x4_t a" ], "return_type": { - "value": "int64x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.S[0]" + "register": "Vn.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMULL" + "USHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_n_u16", + "name": "vmovl_high_u8", "arguments": [ - "uint16x4_t a", - "uint16_t b" + "uint8x16_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.H[0]" + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMULL" + "USHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_n_u32", + "name": "vmovl_s16", "arguments": [ - "uint32x2_t a", - "uint32_t b" + "int16x4_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.S[0]" + "register": "Vn.4H" } }, "Architectures": [ @@ -57911,54 +260765,47 @@ ], "instructions": [ [ - "UMULL" + "SSHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_p64", + "name": "vmovl_s32", "arguments": [ - "poly64_t a", - "poly64_t b" + "int32x2_t a" ], "return_type": { - "value": "poly128_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.1D" - }, - "b": { - "register": "Vm.1D" + "register": "Vn.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "PMULL" + "SSHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_p8", + "name": "vmovl_s8", "arguments": [ - "poly8x8_t a", - "poly8x8_t b" + "int8x8_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" } }, "Architectures": [ @@ -57968,26 +260815,22 @@ ], "instructions": [ [ - "PMULL" + "SSHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_s16", + "name": "vmovl_u16", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "uint16x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" } }, "Architectures": [ @@ -57997,26 +260840,22 @@ ], "instructions": [ [ - "SMULL" + "USHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_s32", + "name": "vmovl_u32", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "uint32x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" } }, "Architectures": [ @@ -58026,26 +260865,22 @@ ], "instructions": [ [ - "SMULL" + "USHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_s8", + "name": "vmovl_u8", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint8x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" } }, "Architectures": [ @@ -58055,26 +260890,26 @@ ], "instructions": [ [ - "SMULL" + "USHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_u16", + "name": "vmovn_high_s16", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "int8x8_t r", + "int16x8_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.8H" }, - "b": { - "register": "Vm.4H" + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -58084,26 +260919,26 @@ ], "instructions": [ [ - "UMULL" + "XTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_u32", + "name": "vmovn_high_s32", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "int16x4_t r", + "int32x4_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4S" }, - "b": { - "register": "Vm.2S" + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -58113,26 +260948,26 @@ ], "instructions": [ [ - "UMULL" + "XTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmull_u8", + "name": "vmovn_high_s64", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "int32x2_t r", + "int64x2_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2D" }, - "b": { - "register": "Vm.8B" + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -58142,54 +260977,55 @@ ], "instructions": [ [ - "UMULL" + "XTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_f16", + "name": "vmovn_high_u16", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "uint8x8_t r", + "uint16x8_t a" ], "return_type": { - "value": "float16x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" }, - "b": { - "register": "Vm.8H" + "r": { + "register": "Vd.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FMUL" + "XTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_f32", + "name": "vmovn_high_u32", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "uint16x4_t r", + "uint32x4_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" }, - "b": { - "register": "Vm.4S" + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -58199,91 +261035,76 @@ ], "instructions": [ [ - "FMUL" + "XTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_f64", + "name": "vmovn_high_u64", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "uint32x2_t r", + "uint64x2_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2D" }, - "b": { - "register": "Vm.2D" + "r": { + "register": "Vd.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMUL" + "XTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_lane_f16", + "name": "vmovn_s16", "arguments": [ - "float16x8_t a", - "float16x4_t v", - "const int lane" + "int16x8_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FMUL" + "XTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_lane_f32", + "name": "vmovn_s32", "arguments": [ - "float32x4_t a", - "float32x2_t v", - "const int lane" + "int32x4_t a" ], "return_type": { - "value": "float32x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" } }, "Architectures": [ @@ -58293,63 +261114,47 @@ ], "instructions": [ [ - "FMUL" + "XTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_lane_f64", + "name": "vmovn_s64", "arguments": [ - "float64x2_t a", - "float64x1_t v", - "const int lane" + "int64x2_t a" ], "return_type": { - "value": "float64x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2D" - }, - "lane": { - "minimum": 0, - "maximum": 0 - }, - "v": { - "register": "Vm.1D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMUL" + "XTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_lane_s16", + "name": "vmovn_u16", "arguments": [ - "int16x8_t a", - "int16x4_t v", - "const int lane" + "uint16x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" } }, "Architectures": [ @@ -58359,31 +261164,22 @@ ], "instructions": [ [ - "MUL" + "XTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_lane_s32", + "name": "vmovn_u32", "arguments": [ - "int32x4_t a", - "int32x2_t v", - "const int lane" + "uint32x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" } }, "Architectures": [ @@ -58393,31 +261189,22 @@ ], "instructions": [ [ - "MUL" + "XTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_lane_u16", + "name": "vmovn_u64", "arguments": [ - "uint16x8_t a", - "uint16x4_t v", - "const int lane" + "uint64x2_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Vn.2D" } }, "Architectures": [ @@ -58427,31 +261214,22 @@ ], "instructions": [ [ - "MUL" + "XTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_lane_u32", + "name": "vmovq_n_f16", "arguments": [ - "uint32x4_t a", - "uint32x2_t v", - "const int lane" + "float16_t value" ], "return_type": { - "value": "uint32x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "value": { + "register": "rn" } }, "Architectures": [ @@ -58461,63 +261239,47 @@ ], "instructions": [ [ - "MUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_laneq_f16", + "name": "vmovq_n_f32", "arguments": [ - "float16x8_t a", - "float16x8_t v", - "const int lane" + "float32_t value" ], "return_type": { - "value": "float16x8_t" + "value": "float32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_laneq_f32", + "name": "vmovq_n_f64", "arguments": [ - "float32x4_t a", - "float32x4_t v", - "const int lane" + "float64_t value" ], "return_type": { - "value": "float32x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "value": { + "register": "rn" } }, "Architectures": [ @@ -58525,214 +261287,172 @@ ], "instructions": [ [ - "FMUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_laneq_f64", + "name": "vmovq_n_p16", "arguments": [ - "float64x2_t a", - "float64x2_t v", - "const int lane" + "poly16_t value" ], "return_type": { - "value": "float64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2D" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_laneq_s16", + "name": "vmovq_n_p8", "arguments": [ - "int16x8_t a", - "int16x8_t v", - "const int lane" + "poly8_t value" ], "return_type": { - "value": "int16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_laneq_s32", + "name": "vmovq_n_s16", "arguments": [ - "int32x4_t a", - "int32x4_t v", - "const int lane" + "int16_t value" ], "return_type": { - "value": "int32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_laneq_u16", + "name": "vmovq_n_s32", "arguments": [ - "uint16x8_t a", - "uint16x8_t v", - "const int lane" + "int32_t value" ], "return_type": { - "value": "uint16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_laneq_u32", + "name": "vmovq_n_s64", "arguments": [ - "uint32x4_t a", - "uint32x4_t v", - "const int lane" + "int64_t value" ], "return_type": { - "value": "uint32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_n_f16", + "name": "vmovq_n_s8", "arguments": [ - "float16x8_t a", - "float16_t n" + "int8_t value" ], "return_type": { - "value": "float16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "n": { - "register": "Vm.H[0]" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FMUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_n_f32", + "name": "vmovq_n_u16", "arguments": [ - "float32x4_t a", - "float32_t b" + "uint16_t value" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.S[0]" + "value": { + "register": "rn" } }, "Architectures": [ @@ -58742,53 +261462,47 @@ ], "instructions": [ [ - "FMUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_n_f64", + "name": "vmovq_n_u32", "arguments": [ - "float64x2_t a", - "float64_t b" + "uint32_t value" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.D[0]" + "value": { + "register": "rn" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_n_s16", + "name": "vmovq_n_u64", "arguments": [ - "int16x8_t a", - "int16_t b" + "uint64_t value" ], "return_type": { - "value": "int16x8_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.H[0]" + "value": { + "register": "rn" } }, "Architectures": [ @@ -58798,26 +261512,22 @@ ], "instructions": [ [ - "MUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_n_s32", + "name": "vmovq_n_u8", "arguments": [ - "int32x4_t a", - "int32_t b" + "uint8_t value" ], "return_type": { - "value": "int32x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.S[0]" + "value": { + "register": "rn" } }, "Architectures": [ @@ -58827,55 +261537,54 @@ ], "instructions": [ [ - "MUL" + "DUP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_n_u16", + "name": "vmul_f16", "arguments": [ - "uint16x8_t a", - "uint16_t b" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, "b": { - "register": "Vm.H[0]" + "register": "Vm.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "MUL" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_n_u32", + "name": "vmul_f32", "arguments": [ - "uint32x4_t a", - "uint32_t b" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" }, "b": { - "register": "Vm.S[0]" + "register": "Vm.2S" } }, "Architectures": [ @@ -58885,84 +261594,91 @@ ], "instructions": [ [ - "MUL" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_p8", + "name": "vmul_f64", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "float64x1_t a", + "float64x1_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Dn" }, "b": { - "register": "Vm.16B" + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "PMUL" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_s16", + "name": "vmul_lane_f16", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "float16x4_t a", + "float16x4_t v", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, - "b": { - "register": "Vm.8H" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "MUL" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_s32", + "name": "vmul_lane_f32", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "float32x2_t a", + "float32x2_t v", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" }, - "b": { - "register": "Vm.4S" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -58972,55 +261688,63 @@ ], "instructions": [ [ - "MUL" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_s8", + "name": "vmul_lane_f64", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "float64x1_t a", + "float64x1_t v", + "const int lane" ], "return_type": { - "value": "int8x16_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Dn" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "v": { + "register": "Vm.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MUL" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulq_u16", + "name": "vmul_lane_s16", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "int16x4_t a", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, - "b": { - "register": "Vm.8H" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -59036,20 +261760,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vmulq_u32", + "name": "vmul_lane_s32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int32x2_t a", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" }, - "b": { - "register": "Vm.4S" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -59065,20 +261794,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vmulq_u8", + "name": "vmul_lane_u16", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "uint16x4_t a", + "uint16x4_t v", + "const int lane" ], "return_type": { - "value": "uint8x16_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4H" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -59094,18 +261828,18 @@ }, { "SIMD_ISA": "Neon", - "name": "vmuls_lane_f32", + "name": "vmul_lane_u32", "arguments": [ - "float32_t a", - "float32x2_t v", + "uint32x2_t a", + "uint32x2_t v", "const int lane" ], "return_type": { - "value": "float32_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vn.2S" }, "lane": { "minimum": 0, @@ -59116,35 +261850,37 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMUL" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmuls_laneq_f32", + "name": "vmul_laneq_f16", "arguments": [ - "float32_t a", - "float32x4_t v", + "float16x4_t a", + "float16x8_t v", "const int lane" ], "return_type": { - "value": "float32_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vn.4H" }, "lane": { "minimum": 0, - "maximum": 3 + "maximum": 7 }, "v": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ @@ -59158,20 +261894,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vmulx_f16", + "name": "vmul_laneq_f32", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "float32x2_t a", + "float32x4_t v", + "const int lane" ], "return_type": { - "value": "float16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.2S" }, - "b": { - "register": "Vm.4H" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ @@ -59179,26 +261920,31 @@ ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulx_f32", + "name": "vmul_laneq_f64", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "float64x1_t a", + "float64x2_t v", + "const int lane" ], "return_type": { - "value": "float32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Dn" }, - "b": { - "register": "Vm.2S" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2D" } }, "Architectures": [ @@ -59206,26 +261952,31 @@ ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulx_f64", + "name": "vmul_laneq_s16", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "int16x4_t a", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "float64x1_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.4H" }, - "b": { - "register": "Dm" + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -59233,31 +261984,31 @@ ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulx_lane_f16", + "name": "vmul_laneq_s32", "arguments": [ - "float16x4_t a", - "float16x4_t v", + "int32x2_t a", + "int32x4_t v", "const int lane" ], "return_type": { - "value": "float16x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.2S" }, "lane": { "minimum": 0, "maximum": 3 }, "v": { - "register": "Vm.4H" + "register": "Vm.4S" } }, "Architectures": [ @@ -59265,31 +262016,31 @@ ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulx_lane_f32", + "name": "vmul_laneq_u16", "arguments": [ - "float32x2_t a", - "float32x2_t v", + "uint16x4_t a", + "uint16x8_t v", "const int lane" ], "return_type": { - "value": "float32x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4H" }, "lane": { "minimum": 0, - "maximum": 1 + "maximum": 7 }, "v": { - "register": "Vm.2S" + "register": "Vm.8H" } }, "Architectures": [ @@ -59297,31 +262048,31 @@ ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulx_lane_f64", + "name": "vmul_laneq_u32", "arguments": [ - "float64x1_t a", - "float64x1_t v", + "uint32x2_t a", + "uint32x4_t v", "const int lane" ], "return_type": { - "value": "float64x1_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.2S" }, "lane": { "minimum": 0, - "maximum": 0 + "maximum": 3 }, "v": { - "register": "Vm.1D" + "register": "Vm.4S" } }, "Architectures": [ @@ -59329,17 +262080,16 @@ ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulx_laneq_f16", + "name": "vmul_n_f16", "arguments": [ "float16x4_t a", - "float16x8_t v", - "const int lane" + "float16_t n" ], "return_type": { "value": "float16x4_t" @@ -59348,30 +262098,26 @@ "a": { "register": "Vn.4H" }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "n": { + "register": "Vm.H[0]" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulx_laneq_f32", + "name": "vmul_n_f32", "arguments": [ "float32x2_t a", - "float32x4_t v", - "const int lane" + "float32_t b" ], "return_type": { "value": "float32x2_t" @@ -59380,30 +262126,27 @@ "a": { "register": "Vn.2S" }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "b": { + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulx_laneq_f64", + "name": "vmul_n_f64", "arguments": [ "float64x1_t a", - "float64x2_t v", - "const int lane" + "float64_t b" ], "return_type": { "value": "float64x1_t" @@ -59412,12 +262155,8 @@ "a": { "register": "Dn" }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2D" + "b": { + "register": "Vm.D[0]" } }, "Architectures": [ @@ -59425,321 +262164,350 @@ ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulx_n_f16", + "name": "vmul_n_s16", "arguments": [ - "float16x4_t a", - "float16_t n" + "int16x4_t a", + "int16_t b" ], "return_type": { - "value": "float16x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4H" }, - "n": { + "b": { "register": "Vm.H[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxd_f64", + "name": "vmul_n_s32", "arguments": [ - "float64_t a", - "float64_t b" + "int32x2_t a", + "int32_t b" ], "return_type": { - "value": "float64_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.2S" }, "b": { - "register": "Dm" + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxd_lane_f64", + "name": "vmul_n_u16", "arguments": [ - "float64_t a", - "float64x1_t v", - "const int lane" + "uint16x4_t a", + "uint16_t b" ], "return_type": { - "value": "float64_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "lane": { - "minimum": 0, - "maximum": 0 + "register": "Vn.4H" }, - "v": { - "register": "Vm.1D" + "b": { + "register": "Vm.H[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxd_laneq_f64", + "name": "vmul_n_u32", "arguments": [ - "float64_t a", - "float64x2_t v", - "const int lane" + "uint32x2_t a", + "uint32_t b" ], "return_type": { - "value": "float64_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "register": "Vn.2S" }, - "v": { - "register": "Vm.2D" + "b": { + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxh_f16", + "name": "vmul_p8", "arguments": [ - "float16_t a", - "float16_t b" + "poly8x8_t a", + "poly8x8_t b" ], "return_type": { - "value": "float16_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.8B" }, "b": { - "register": "Hm" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "PMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxh_lane_f16", + "name": "vmul_s16", "arguments": [ - "float16_t a", - "float16x4_t v", - "const int lane" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "float16_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.4H" }, - "v": { + "b": { "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxh_laneq_f16", + "name": "vmul_s32", "arguments": [ - "float16_t a", - "float16x8_t v", - "const int lane" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "float16_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.2S" }, - "lane": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vmul_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" }, - "v": { - "register": "Vm.8H" + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_f16", + "name": "vmul_u16", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "float16x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, "b": { - "register": "Vm.8H" + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_f32", + "name": "vmul_u32", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "float32x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" }, "b": { - "register": "Vm.4S" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_f64", + "name": "vmul_u8", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "float64x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.8B" }, "b": { - "register": "Vm.2D" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMULX" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_lane_f16", + "name": "vmuld_lane_f64", "arguments": [ - "float16x8_t a", - "float16x4_t v", + "float64_t a", + "float64x1_t v", "const int lane" ], "return_type": { - "value": "float16x8_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Dn" }, "lane": { "minimum": 0, - "maximum": 3 + "maximum": 0 }, "v": { - "register": "Vm.4H" + "register": "Vm.1D" } }, "Architectures": [ @@ -59747,31 +262515,31 @@ ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_lane_f32", + "name": "vmuld_laneq_f64", "arguments": [ - "float32x4_t a", - "float32x2_t v", + "float64_t a", + "float64x2_t v", "const int lane" ], "return_type": { - "value": "float32x4_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Dn" }, "lane": { "minimum": 0, "maximum": 1 }, "v": { - "register": "Vm.2S" + "register": "Vm.2D" } }, "Architectures": [ @@ -59779,31 +262547,59 @@ ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_lane_f64", + "name": "vmulh_f16", "arguments": [ - "float64x2_t a", - "float64x1_t v", + "float16_t a", + "float16_t b" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A32", + "A64" + ], + "instructions": [ + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vmulh_lane_f16", + "arguments": [ + "float16_t a", + "float16x4_t v", "const int lane" ], "return_type": { - "value": "float64x2_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Hn" }, "lane": { "minimum": 0, - "maximum": 0 + "maximum": 3 }, "v": { - "register": "Vm.1D" + "register": "Vm.4H" } }, "Architectures": [ @@ -59811,24 +262607,24 @@ ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_laneq_f16", + "name": "vmulh_laneq_f16", "arguments": [ - "float16x8_t a", + "float16_t a", "float16x8_t v", "const int lane" ], "return_type": { - "value": "float16x8_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Hn" }, "lane": { "minimum": 0, @@ -59843,31 +262639,31 @@ ], "instructions": [ [ - "FMULX" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_laneq_f32", + "name": "vmull_high_lane_s16", "arguments": [ - "float32x4_t a", - "float32x4_t v", + "int16x8_t a", + "int16x4_t v", "const int lane" ], "return_type": { - "value": "float32x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8H" }, "lane": { "minimum": 0, "maximum": 3 }, "v": { - "register": "Vm.4S" + "register": "Vm.4H" } }, "Architectures": [ @@ -59875,31 +262671,31 @@ ], "instructions": [ [ - "FMULX" + "SMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_laneq_f64", + "name": "vmull_high_lane_s32", "arguments": [ - "float64x2_t a", - "float64x2_t v", + "int32x4_t a", + "int32x2_t v", "const int lane" ], "return_type": { - "value": "float64x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4S" }, "lane": { "minimum": 0, "maximum": 1 }, "v": { - "register": "Vm.2D" + "register": "Vm.2S" } }, "Architectures": [ @@ -59907,26 +262703,31 @@ ], "instructions": [ [ - "FMULX" + "SMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxq_n_f16", + "name": "vmull_high_lane_u16", "arguments": [ - "float16x8_t a", - "float16_t n" + "uint16x8_t a", + "uint16x4_t v", + "const int lane" ], "return_type": { - "value": "float16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" }, - "n": { - "register": "Vm.H[0]" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -59934,26 +262735,31 @@ ], "instructions": [ [ - "FMULX" + "UMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxs_f32", + "name": "vmull_high_lane_u32", "arguments": [ - "float32_t a", - "float32_t b" + "uint32x4_t a", + "uint32x2_t v", + "const int lane" ], "return_type": { - "value": "float32_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vn.4S" }, - "b": { - "register": "Sm" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -59961,31 +262767,31 @@ ], "instructions": [ [ - "FMULX" + "UMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxs_lane_f32", + "name": "vmull_high_laneq_s16", "arguments": [ - "float32_t a", - "float32x2_t v", + "int16x8_t a", + "int16x8_t v", "const int lane" ], "return_type": { - "value": "float32_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vn.8H" }, "lane": { "minimum": 0, - "maximum": 1 + "maximum": 7 }, "v": { - "register": "Vm.2S" + "register": "Vm.8H" } }, "Architectures": [ @@ -59993,24 +262799,24 @@ ], "instructions": [ [ - "FMULX" + "SMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmulxs_laneq_f32", + "name": "vmull_high_laneq_s32", "arguments": [ - "float32_t a", - "float32x4_t v", + "int32x4_t a", + "int32x4_t v", "const int lane" ], "return_type": { - "value": "float32_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vn.4S" }, "lane": { "minimum": 0, @@ -60025,396 +262831,454 @@ ], "instructions": [ [ - "FMULX" + "SMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvn_p8", + "name": "vmull_high_laneq_u16", "arguments": [ - "poly8x8_t a" + "uint16x8_t a", + "uint16x8_t v", + "const int lane" ], "return_type": { - "value": "poly8x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "UMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvn_s16", + "name": "vmull_high_laneq_u32", "arguments": [ - "int16x4_t a" + "uint32x4_t a", + "uint32x4_t v", + "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "UMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvn_s32", + "name": "vmull_high_n_s16", "arguments": [ - "int32x2_t a" + "int16x8_t a", + "int16_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "SMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvn_s8", + "name": "vmull_high_n_s32", "arguments": [ - "int8x8_t a" + "int32x4_t a", + "int32_t b" ], "return_type": { - "value": "int8x8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "SMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvn_u16", + "name": "vmull_high_n_u16", "arguments": [ - "uint16x4_t a" + "uint16x8_t a", + "uint16_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "UMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvn_u32", + "name": "vmull_high_n_u32", "arguments": [ - "uint32x2_t a" + "uint32x4_t a", + "uint32_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "UMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvn_u8", + "name": "vmull_high_p64", "arguments": [ - "uint8x8_t a" + "poly64x2_t a", + "poly64x2_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "MVN" + "PMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvnq_p8", + "name": "vmull_high_p8", "arguments": [ - "poly8x16_t a" + "poly8x16_t a", + "poly8x16_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "PMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvnq_s16", + "name": "vmull_high_s16", "arguments": [ - "int16x8_t a" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "SMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvnq_s32", + "name": "vmull_high_s32", "arguments": [ - "int32x4_t a" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "SMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvnq_s8", + "name": "vmull_high_s8", "arguments": [ - "int8x16_t a" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int8x16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "SMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvnq_u16", + "name": "vmull_high_u16", "arguments": [ - "uint16x8_t a" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "UMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvnq_u32", + "name": "vmull_high_u32", "arguments": [ - "uint32x4_t a" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "UMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vmvnq_u8", + "name": "vmull_high_u8", "arguments": [ - "uint8x16_t a" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MVN" + "UMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vneg_f16", + "name": "vmull_lane_s16", "arguments": [ - "float16x4_t a" + "int16x4_t a", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "float16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FNEG" + "SMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vneg_f32", + "name": "vmull_lane_s32", "arguments": [ - "float32x2_t a" + "int32x2_t a", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "float32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -60424,45 +263288,65 @@ ], "instructions": [ [ - "FNEG" + "SMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vneg_f64", + "name": "vmull_lane_u16", "arguments": [ - "float64x1_t a" + "uint16x4_t a", + "uint16x4_t v", + "const int lane" ], "return_type": { - "value": "float64x1_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FNEG" + "UMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vneg_s16", + "name": "vmull_lane_u32", "arguments": [ - "int16x4_t a" + "uint32x2_t a", + "uint32x2_t v", + "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -60472,47 +263356,63 @@ ], "instructions": [ [ - "NEG" + "UMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vneg_s32", + "name": "vmull_laneq_s16", "arguments": [ - "int32x2_t a" + "int16x4_t a", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NEG" + "SMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vneg_s64", + "name": "vmull_laneq_s32", "arguments": [ - "int64x1_t a" + "int32x2_t a", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "int64x1_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ @@ -60520,47 +263420,63 @@ ], "instructions": [ [ - "NEG" + "SMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vneg_s8", + "name": "vmull_laneq_u16", "arguments": [ - "int8x8_t a" + "uint16x4_t a", + "uint16x8_t v", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NEG" + "UMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vnegd_s64", + "name": "vmull_laneq_u32", "arguments": [ - "int64_t a" + "uint32x2_t a", + "uint32x4_t v", + "const int lane" ], "return_type": { - "value": "int64_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ @@ -60568,70 +263484,84 @@ ], "instructions": [ [ - "NEG" + "UMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vnegh_f16", + "name": "vmull_n_s16", "arguments": [ - "float16_t a" + "int16x4_t a", + "int16_t b" ], "return_type": { - "value": "float16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H[0]" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FNEG" + "SMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vnegq_f16", + "name": "vmull_n_s32", "arguments": [ - "float16x8_t a" + "int32x2_t a", + "int32_t b" ], "return_type": { - "value": "float16x8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FNEG" + "SMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vnegq_f32", + "name": "vmull_n_u16", "arguments": [ - "float32x4_t a" + "uint16x4_t a", + "uint16_t b" ], "return_type": { - "value": "float32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H[0]" } }, "Architectures": [ @@ -60641,70 +263571,83 @@ ], "instructions": [ [ - "FNEG" + "UMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vnegq_f64", + "name": "vmull_n_u32", "arguments": [ - "float64x2_t a" + "uint32x2_t a", + "uint32_t b" ], "return_type": { - "value": "float64x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FNEG" + "UMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vnegq_s16", + "name": "vmull_p64", "arguments": [ - "int16x8_t a" + "poly64_t a", + "poly64_t b" ], "return_type": { - "value": "int16x8_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.1D" + }, + "b": { + "register": "Vm.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "NEG" + "PMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vnegq_s32", + "name": "vmull_p8", "arguments": [ - "int32x4_t a" + "poly8x8_t a", + "poly8x8_t b" ], "return_type": { - "value": "int32x4_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" } }, "Architectures": [ @@ -60714,45 +263657,55 @@ ], "instructions": [ [ - "NEG" + "PMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vnegq_s64", + "name": "vmull_s16", "arguments": [ - "int64x2_t a" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "NEG" + "SMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vnegq_s8", + "name": "vmull_s32", "arguments": [ - "int8x16_t a" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "int8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" } }, "Architectures": [ @@ -60762,19 +263715,19 @@ ], "instructions": [ [ - "NEG" + "SMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorn_s16", + "name": "vmull_s8", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -60791,26 +263744,26 @@ ], "instructions": [ [ - "ORN" + "SMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorn_s32", + "name": "vmull_u16", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "int32x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4H" }, "b": { - "register": "Vm.8B" + "register": "Vm.4H" } }, "Architectures": [ @@ -60820,26 +263773,26 @@ ], "instructions": [ [ - "ORN" + "UMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorn_s64", + "name": "vmull_u32", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "int64x1_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2S" }, "b": { - "register": "Vm.8B" + "register": "Vm.2S" } }, "Architectures": [ @@ -60849,19 +263802,19 @@ ], "instructions": [ [ - "ORN" + "UMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorn_s8", + "name": "vmull_u8", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "int8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { @@ -60878,55 +263831,54 @@ ], "instructions": [ [ - "ORN" + "UMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorn_u16", + "name": "vmulq_f16", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, "b": { - "register": "Vm.8B" + "register": "Vm.8H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ORN" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorn_u32", + "name": "vmulq_f32", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" }, "b": { - "register": "Vm.8B" + "register": "Vm.4S" } }, "Architectures": [ @@ -60936,84 +263888,91 @@ ], "instructions": [ [ - "ORN" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorn_u64", + "name": "vmulq_f64", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "uint64x1_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2D" }, "b": { - "register": "Vm.8B" + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORN" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorn_u8", + "name": "vmulq_lane_f16", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "float16x8_t a", + "float16x4_t v", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, - "b": { - "register": "Vm.8B" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ORN" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vornq_s16", + "name": "vmulq_lane_f32", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "float32x4_t a", + "float32x2_t v", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -61023,55 +263982,63 @@ ], "instructions": [ [ - "ORN" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vornq_s32", + "name": "vmulq_lane_f64", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "float64x2_t a", + "float64x1_t v", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2D" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "v": { + "register": "Vm.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORN" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vornq_s64", + "name": "vmulq_lane_s16", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "int16x8_t a", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "int64x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -61081,26 +264048,31 @@ ], "instructions": [ [ - "ORN" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vornq_s8", + "name": "vmulq_lane_s32", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "int32x4_t a", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "int8x16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -61110,26 +264082,31 @@ ], "instructions": [ [ - "ORN" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vornq_u16", + "name": "vmulq_lane_u16", "arguments": [ "uint16x8_t a", - "uint16x8_t b" + "uint16x4_t v", + "const int lane" ], "return_type": { "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -61139,26 +264116,31 @@ ], "instructions": [ [ - "ORN" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vornq_u32", + "name": "vmulq_lane_u32", "arguments": [ "uint32x4_t a", - "uint32x4_t b" + "uint32x2_t v", + "const int lane" ], "return_type": { "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -61168,258 +264150,278 @@ ], "instructions": [ [ - "ORN" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vornq_u64", + "name": "vmulq_laneq_f16", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "float16x8_t a", + "float16x8_t v", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORN" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vornq_u8", + "name": "vmulq_laneq_f32", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "float32x4_t a", + "float32x4_t v", + "const int lane" ], "return_type": { - "value": "uint8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORN" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorr_s16", + "name": "vmulq_laneq_f64", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "float64x2_t a", + "float64x2_t v", + "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2D" }, - "b": { - "register": "Vm.8B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORR" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorr_s32", + "name": "vmulq_laneq_s16", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "int16x8_t a", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, - "b": { - "register": "Vm.8B" + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorr_s64", + "name": "vmulq_laneq_s32", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "int32x4_t a", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "int64x1_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" }, - "b": { - "register": "Vm.8B" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorr_s8", + "name": "vmulq_laneq_u16", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint16x8_t a", + "uint16x8_t v", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, - "b": { - "register": "Vm.8B" + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorr_u16", + "name": "vmulq_laneq_u32", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "uint32x4_t a", + "uint32x4_t v", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" }, - "b": { - "register": "Vm.8B" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorr_u32", + "name": "vmulq_n_f16", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "float16x8_t a", + "float16_t n" ], "return_type": { - "value": "uint32x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, - "b": { - "register": "Vm.8B" + "n": { + "register": "Vm.H[0]" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ORR" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorr_u64", + "name": "vmulq_n_f32", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "float32x4_t a", + "float32_t b" ], "return_type": { - "value": "uint64x1_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" }, "b": { - "register": "Vm.8B" + "register": "Vm.S[0]" } }, "Architectures": [ @@ -61429,55 +264431,53 @@ ], "instructions": [ [ - "ORR" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorr_u8", + "name": "vmulq_n_f64", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "float64x2_t a", + "float64_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2D" }, "b": { - "register": "Vm.8B" + "register": "Vm.D[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ORR" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorrq_s16", + "name": "vmulq_n_s16", "arguments": [ "int16x8_t a", - "int16x8_t b" + "int16_t b" ], "return_type": { "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, "b": { - "register": "Vm.16B" + "register": "Vm.H[0]" } }, "Architectures": [ @@ -61487,26 +264487,26 @@ ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorrq_s32", + "name": "vmulq_n_s32", "arguments": [ "int32x4_t a", - "int32x4_t b" + "int32_t b" ], "return_type": { "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, "b": { - "register": "Vm.16B" + "register": "Vm.S[0]" } }, "Architectures": [ @@ -61516,26 +264516,26 @@ ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorrq_s64", + "name": "vmulq_n_u16", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "uint16x8_t a", + "uint16_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, "b": { - "register": "Vm.16B" + "register": "Vm.H[0]" } }, "Architectures": [ @@ -61545,26 +264545,26 @@ ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorrq_s8", + "name": "vmulq_n_u32", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "uint32x4_t a", + "uint32_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, "b": { - "register": "Vm.16B" + "register": "Vm.S[0]" } }, "Architectures": [ @@ -61574,19 +264574,19 @@ ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorrq_u16", + "name": "vmulq_p8", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "poly8x16_t a", + "poly8x16_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -61603,26 +264603,26 @@ ], "instructions": [ [ - "ORR" + "PMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorrq_u32", + "name": "vmulq_s16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, "b": { - "register": "Vm.16B" + "register": "Vm.8H" } }, "Architectures": [ @@ -61632,26 +264632,26 @@ ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorrq_u64", + "name": "vmulq_s32", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, "b": { - "register": "Vm.16B" + "register": "Vm.4S" } }, "Architectures": [ @@ -61661,19 +264661,19 @@ ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vorrq_u8", + "name": "vmulq_s8", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { @@ -61690,26 +264690,26 @@ ], "instructions": [ [ - "ORR" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadal_s16", + "name": "vmulq_u16", "arguments": [ - "int32x2_t a", - "int16x4_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int32x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.8H" }, "b": { - "register": "Vn.4H" + "register": "Vm.8H" } }, "Architectures": [ @@ -61719,26 +264719,26 @@ ], "instructions": [ [ - "SADALP" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadal_s32", + "name": "vmulq_u32", "arguments": [ - "int64x1_t a", - "int32x2_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int64x1_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vn.4S" }, "b": { - "register": "Vn.2S" + "register": "Vm.4S" } }, "Architectures": [ @@ -61748,26 +264748,26 @@ ], "instructions": [ [ - "SADALP" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadal_s8", + "name": "vmulq_u8", "arguments": [ - "int16x4_t a", - "int8x8_t b" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "int16x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.16B" }, "b": { - "register": "Vn.8B" + "register": "Vm.16B" } }, "Architectures": [ @@ -61777,277 +264777,258 @@ ], "instructions": [ [ - "SADALP" + "MUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadal_u16", + "name": "vmuls_lane_f32", "arguments": [ - "uint32x2_t a", - "uint16x4_t b" + "float32_t a", + "float32x2_t v", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Sn" }, - "b": { - "register": "Vn.4H" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "UADALP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vpadal_u32", - "arguments": [ - "uint64x1_t a", - "uint32x2_t b" - ], - "return_type": { - "value": "uint64x1_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.1D" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vn.2S" + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADALP" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadal_u8", + "name": "vmuls_laneq_f32", "arguments": [ - "uint16x4_t a", - "uint8x8_t b" + "float32_t a", + "float32x4_t v", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Sn" }, - "b": { - "register": "Vn.8B" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADALP" + "FMUL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadalq_s16", + "name": "vmulx_f16", "arguments": [ - "int32x4_t a", - "int16x8_t b" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.4H" }, "b": { - "register": "Vn.8H" + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADALP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadalq_s32", + "name": "vmulx_f32", "arguments": [ - "int64x2_t a", - "int32x4_t b" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int64x2_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.2S" }, "b": { - "register": "Vn.4S" + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADALP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadalq_s8", + "name": "vmulx_f64", "arguments": [ - "int16x8_t a", - "int8x16_t b" + "float64x1_t a", + "float64x1_t b" ], "return_type": { - "value": "int16x8_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Dn" }, "b": { - "register": "Vn.16B" + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADALP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadalq_u16", + "name": "vmulx_lane_f16", "arguments": [ - "uint32x4_t a", - "uint16x8_t b" + "float16x4_t a", + "float16x4_t v", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.4H" }, - "b": { - "register": "Vn.8H" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADALP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadalq_u32", + "name": "vmulx_lane_f32", "arguments": [ - "uint64x2_t a", - "uint32x4_t b" + "float32x2_t a", + "float32x2_t v", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.2S" }, - "b": { - "register": "Vn.4S" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADALP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadalq_u8", + "name": "vmulx_lane_f64", "arguments": [ - "uint16x8_t a", - "uint8x16_t b" + "float64x1_t a", + "float64x1_t v", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Dn" }, - "b": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "v": { + "register": "Vm.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADALP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadd_f16", + "name": "vmulx_laneq_f16", "arguments": [ "float16x4_t a", - "float16x4_t b" + "float16x8_t v", + "const int lane" ], "return_type": { "value": "float16x4_t" @@ -62056,26 +265037,30 @@ "a": { "register": "Vn.4H" }, - "b": { - "register": "Vm.4H" + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadd_f32", + "name": "vmulx_laneq_f32", "arguments": [ "float32x2_t a", - "float32x2_t b" + "float32x4_t v", + "const int lane" ], "return_type": { "value": "float32x2_t" @@ -62084,207 +265069,221 @@ "a": { "register": "Vn.2S" }, - "b": { - "register": "Vm.2S" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadd_s16", + "name": "vmulx_laneq_f64", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "float64x1_t a", + "float64x2_t v", + "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Dn" }, - "b": { - "register": "Vm.4H" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadd_s32", + "name": "vmulx_n_f16", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "float16x4_t a", + "float16_t n" ], "return_type": { - "value": "int32x2_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4H" }, - "b": { - "register": "Vm.2S" + "n": { + "register": "Vm.H[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadd_s8", + "name": "vmulxd_f64", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "float64_t a", + "float64_t b" ], "return_type": { - "value": "int8x8_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Dn" }, "b": { - "register": "Vm.8B" + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadd_u16", + "name": "vmulxd_lane_f64", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "float64_t a", + "float64x1_t v", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Dn" }, - "b": { - "register": "Vm.4H" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "v": { + "register": "Vm.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadd_u32", + "name": "vmulxd_laneq_f64", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "float64_t a", + "float64x2_t v", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Dn" }, - "b": { - "register": "Vm.2S" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadd_u8", + "name": "vmulxh_f16", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "float16_t a", + "float16_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Hn" }, "b": { - "register": "Vm.8B" + "register": "Hm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddd_f64", + "name": "vmulxh_lane_f16", "arguments": [ - "float64x2_t a" + "float16_t a", + "float16x4_t v", + "const int lane" ], "return_type": { - "value": "float64_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Hn" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -62292,22 +265291,31 @@ ], "instructions": [ [ - "FADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddd_s64", + "name": "vmulxh_laneq_f16", "arguments": [ - "int64x2_t a" + "float16_t a", + "float16x8_t v", + "const int lane" ], "return_type": { - "value": "int64_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Hn" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -62315,22 +265323,26 @@ ], "instructions": [ [ - "ADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddd_u64", + "name": "vmulxq_f16", "arguments": [ - "uint64x2_t a" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "uint64_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" } }, "Architectures": [ @@ -62338,674 +265350,711 @@ ], "instructions": [ [ - "ADDP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddl_s16", + "name": "vmulxq_f32", "arguments": [ - "int16x4_t a" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int32x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddl_s32", + "name": "vmulxq_f64", "arguments": [ - "int32x2_t a" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "int64x1_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddl_s8", + "name": "vmulxq_lane_f16", "arguments": [ - "int8x8_t a" + "float16x8_t a", + "float16x4_t v", + "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddl_u16", + "name": "vmulxq_lane_f32", "arguments": [ - "uint16x4_t a" + "float32x4_t a", + "float32x2_t v", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddl_u32", + "name": "vmulxq_lane_f64", "arguments": [ - "uint32x2_t a" + "float64x2_t a", + "float64x1_t v", + "const int lane" ], "return_type": { - "value": "uint64x1_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.2D" + }, + "lane": { + "minimum": 0, + "maximum": 0 + }, + "v": { + "register": "Vm.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddl_u8", + "name": "vmulxq_laneq_f16", "arguments": [ - "uint8x8_t a" + "float16x8_t a", + "float16x8_t v", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddlq_s16", + "name": "vmulxq_laneq_f32", "arguments": [ - "int16x8_t a" + "float32x4_t a", + "float32x4_t v", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddlq_s32", + "name": "vmulxq_laneq_f64", "arguments": [ - "int32x4_t a" + "float64x2_t a", + "float64x2_t v", + "const int lane" ], "return_type": { - "value": "int64x2_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2D" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddlq_s8", + "name": "vmulxq_n_f16", "arguments": [ - "int8x16_t a" + "float16x8_t a", + "float16_t n" ], "return_type": { - "value": "int16x8_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" + }, + "n": { + "register": "Vm.H[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddlq_u16", + "name": "vmulxs_f32", "arguments": [ - "uint16x8_t a" + "float32_t a", + "float32_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Sn" + }, + "b": { + "register": "Sm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddlq_u32", + "name": "vmulxs_lane_f32", "arguments": [ - "uint32x4_t a" + "float32_t a", + "float32x2_t v", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Sn" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddlq_u8", + "name": "vmulxs_laneq_f32", "arguments": [ - "uint8x16_t a" + "float32_t a", + "float32x4_t v", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Sn" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UADDLP" + "FMULX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_f16", + "name": "vmvn_p8", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "poly8x8_t a" ], "return_type": { - "value": "float16x8_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_f32", + "name": "vmvn_s16", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "int16x4_t a" ], "return_type": { - "value": "float32x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_f64", + "name": "vmvn_s32", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "int32x2_t a" ], "return_type": { - "value": "float64x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_s16", + "name": "vmvn_s8", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int8x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_s32", + "name": "vmvn_u16", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "uint16x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_s64", + "name": "vmvn_u32", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "uint32x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_s8", + "name": "vmvn_u8", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "uint8x8_t a" ], "return_type": { - "value": "int8x16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_u16", + "name": "vmvnq_p8", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "poly8x16_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_u32", + "name": "vmvnq_s16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int16x8_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_u64", + "name": "vmvnq_s32", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "int32x4_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpaddq_u8", + "name": "vmvnq_s8", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "int8x16_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpadds_f32", + "name": "vmvnq_u16", "arguments": [ - "float32x2_t a" + "uint16x8_t a" ], "return_type": { - "value": "float32_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FADDP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmax_f16", + "name": "vmvnq_u32", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "uint32x4_t a" ], "return_type": { - "value": "float16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "register": "Vn.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FMAXP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmax_f32", + "name": "vmvnq_u8", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "uint8x16_t a" ], "return_type": { - "value": "float32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" + "register": "Vn.16B" } }, "Architectures": [ @@ -63015,55 +266064,46 @@ ], "instructions": [ [ - "FMAXP" + "MVN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmax_s16", + "name": "vneg_f16", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "float16x4_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SMAXP" + "FNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmax_s32", + "name": "vneg_f32", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "float32x2_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" } }, "Architectures": [ @@ -63073,55 +266113,45 @@ ], "instructions": [ [ - "SMAXP" + "FNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmax_s8", + "name": "vneg_f64", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "float64x1_t a" ], "return_type": { - "value": "int8x8_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SMAXP" + "FNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmax_u16", + "name": "vneg_s16", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "int16x4_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" } }, "Architectures": [ @@ -63131,26 +266161,22 @@ ], "instructions": [ [ - "UMAXP" + "NEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmax_u32", + "name": "vneg_s32", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "int32x2_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" } }, "Architectures": [ @@ -63160,82 +266186,70 @@ ], "instructions": [ [ - "UMAXP" + "NEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmax_u8", + "name": "vneg_s64", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "int64x1_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UMAXP" + "NEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxnm_f16", + "name": "vneg_s8", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "int8x8_t a" ], "return_type": { - "value": "float16x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAXNMP" + "NEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxnm_f32", + "name": "vnegd_s64", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "int64_t a" ], "return_type": { - "value": "float32x2_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" + "register": "Dn" } }, "Architectures": [ @@ -63243,99 +266257,91 @@ ], "instructions": [ [ - "FMAXNMP" + "NEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxnmq_f16", + "name": "vnegh_f16", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "float16_t a" ], "return_type": { - "value": "float16x8_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Hn" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FMAXNMP" + "FNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxnmq_f32", + "name": "vnegq_f16", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "float16x8_t a" ], "return_type": { - "value": "float32x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn.8H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FMAXNMP" + "FNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxnmq_f64", + "name": "vnegq_f32", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "float32x4_t a" ], "return_type": { - "value": "float64x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAXNMP" + "FNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxnmqd_f64", + "name": "vnegq_f64", "arguments": [ "float64x2_t a" ], "return_type": { - "value": "float64_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -63347,76 +266353,72 @@ ], "instructions": [ [ - "FMAXNMP" + "FNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxnms_f32", + "name": "vnegq_s16", "arguments": [ - "float32x2_t a" + "int16x8_t a" ], "return_type": { - "value": "float32_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAXNMP" + "NEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxq_f16", + "name": "vnegq_s32", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "int32x4_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vn.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAXP" + "NEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxq_f32", + "name": "vnegq_s64", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "int64x2_t a" ], "return_type": { - "value": "float32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn.2D" } }, "Architectures": [ @@ -63424,289 +266426,312 @@ ], "instructions": [ [ - "FMAXP" + "NEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxq_f64", + "name": "vnegq_s8", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "int8x16_t a" ], "return_type": { - "value": "float64x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAXP" + "NEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxq_s16", + "name": "vorn_s16", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm.8H" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMAXP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxq_s32", + "name": "vorn_s32", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8B" }, "b": { - "register": "Vm.4S" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMAXP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxq_s8", + "name": "vorn_s64", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "int64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "int8x16_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8B" }, "b": { - "register": "Vm.16B" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMAXP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxq_u16", + "name": "vorn_s8", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm.8H" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMAXP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxq_u32", + "name": "vorn_u16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8B" }, "b": { - "register": "Vm.4S" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMAXP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxq_u8", + "name": "vorn_u32", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8B" }, "b": { - "register": "Vm.16B" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMAXP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxqd_f64", + "name": "vorn_u64", "arguments": [ - "float64x2_t a" + "uint64x1_t a", + "uint64x1_t b" ], "return_type": { - "value": "float64_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAXP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmaxs_f32", + "name": "vorn_u8", "arguments": [ - "float32x2_t a" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "float32_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMAXP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmin_f16", + "name": "vornq_s16", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "float16x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.16B" }, "b": { - "register": "Vm.4H" + "register": "Vm.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FMINP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmin_f32", + "name": "vornq_s32", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "float32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.16B" }, "b": { - "register": "Vm.2S" + "register": "Vm.16B" } }, "Architectures": [ @@ -63716,26 +266741,26 @@ ], "instructions": [ [ - "FMINP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmin_s16", + "name": "vornq_s64", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.16B" }, "b": { - "register": "Vm.4H" + "register": "Vm.16B" } }, "Architectures": [ @@ -63745,26 +266770,26 @@ ], "instructions": [ [ - "SMINP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmin_s32", + "name": "vornq_s8", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.16B" }, "b": { - "register": "Vm.2S" + "register": "Vm.16B" } }, "Architectures": [ @@ -63774,26 +266799,26 @@ ], "instructions": [ [ - "SMINP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmin_s8", + "name": "vornq_u16", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.16B" }, "b": { - "register": "Vm.8B" + "register": "Vm.16B" } }, "Architectures": [ @@ -63803,26 +266828,26 @@ ], "instructions": [ [ - "SMINP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmin_u16", + "name": "vornq_u32", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.16B" }, "b": { - "register": "Vm.4H" + "register": "Vm.16B" } }, "Architectures": [ @@ -63832,26 +266857,26 @@ ], "instructions": [ [ - "UMINP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmin_u32", + "name": "vornq_u64", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.16B" }, "b": { - "register": "Vm.2S" + "register": "Vm.16B" } }, "Architectures": [ @@ -63861,26 +266886,26 @@ ], "instructions": [ [ - "UMINP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmin_u8", + "name": "vornq_u8", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.16B" }, "b": { - "register": "Vm.8B" + "register": "Vm.16B" } }, "Architectures": [ @@ -63890,335 +266915,367 @@ ], "instructions": [ [ - "UMINP" + "ORN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminnm_f16", + "name": "vorr_s16", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "float16x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.8B" }, "b": { - "register": "Vm.4H" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINNMP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminnm_f32", + "name": "vorr_s32", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "float32x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.8B" }, "b": { - "register": "Vm.2S" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINNMP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminnmq_f16", + "name": "vorr_s64", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "int64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "float16x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm.8H" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINNMP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminnmq_f32", + "name": "vorr_s8", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "float32x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8B" }, "b": { - "register": "Vm.4S" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINNMP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminnmq_f64", + "name": "vorr_u16", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "float64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.8B" }, "b": { - "register": "Vm.2D" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINNMP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminnmqd_f64", + "name": "vorr_u32", "arguments": [ - "float64x2_t a" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "float64_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINNMP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminnms_f32", + "name": "vorr_u64", "arguments": [ - "float32x2_t a" + "uint64x1_t a", + "uint64x1_t b" ], "return_type": { - "value": "float32_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINNMP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminq_f16", + "name": "vorr_u8", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "float16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm.8H" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminq_f32", + "name": "vorrq_s16", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "float32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.16B" }, "b": { - "register": "Vm.4S" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminq_f64", + "name": "vorrq_s32", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "float64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.16B" }, "b": { - "register": "Vm.2D" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminq_s16", + "name": "vorrq_s64", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.16B" }, "b": { - "register": "Vm.8H" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminq_s32", + "name": "vorrq_s8", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.16B" }, "b": { - "register": "Vm.4S" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminq_s8", + "name": "vorrq_u16", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { @@ -64229,71 +267286,77 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminq_u16", + "name": "vorrq_u32", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.16B" }, "b": { - "register": "Vm.8H" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminq_u32", + "name": "vorrq_u64", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.16B" }, "b": { - "register": "Vm.4S" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminq_u8", + "name": "vorrq_u8", "arguments": [ "uint8x16_t a", "uint8x16_t b" @@ -64310,72 +267373,90 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UMINP" + "ORR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpminqd_f64", + "name": "vpadal_s16", "arguments": [ - "float64x2_t a" + "int32x2_t a", + "int16x4_t b" ], "return_type": { - "value": "float64_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.2S" + }, + "b": { + "register": "Vn.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINP" + "SADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vpmins_f32", + "name": "vpadal_s32", "arguments": [ - "float32x2_t a" + "int64x1_t a", + "int32x2_t b" ], "return_type": { - "value": "float32_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.1D" + }, + "b": { "register": "Vn.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FMINP" + "SADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabs_s16", + "name": "vpadal_s8", "arguments": [ - "int16x4_t a" + "int16x4_t a", + "int8x8_t b" ], "return_type": { "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.4H" + }, + "b": { + "register": "Vn.8B" } }, "Architectures": [ @@ -64385,22 +267466,26 @@ ], "instructions": [ [ - "SQABS" + "SADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabs_s32", + "name": "vpadal_u16", "arguments": [ - "int32x2_t a" + "uint32x2_t a", + "uint16x4_t b" ], "return_type": { - "value": "int32x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.2S" + }, + "b": { + "register": "Vn.4H" } }, "Architectures": [ @@ -64410,44 +267495,54 @@ ], "instructions": [ [ - "SQABS" + "UADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabs_s64", + "name": "vpadal_u32", "arguments": [ - "int64x1_t a" + "uint64x1_t a", + "uint32x2_t b" ], "return_type": { - "value": "int64x1_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.1D" + }, + "b": { + "register": "Vn.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQABS" + "UADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabs_s8", + "name": "vpadal_u8", "arguments": [ - "int8x8_t a" + "uint16x4_t a", + "uint8x8_t b" ], "return_type": { - "value": "int8x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.4H" + }, + "b": { "register": "Vn.8B" } }, @@ -64458,90 +267553,112 @@ ], "instructions": [ [ - "SQABS" + "UADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabsb_s8", + "name": "vpadalq_s16", "arguments": [ - "int8_t a" + "int32x4_t a", + "int16x8_t b" ], "return_type": { - "value": "int8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQABS" + "SADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabsd_s64", + "name": "vpadalq_s32", "arguments": [ - "int64_t a" + "int64x2_t a", + "int32x4_t b" ], "return_type": { - "value": "int64_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQABS" + "SADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabsh_s16", + "name": "vpadalq_s8", "arguments": [ - "int16_t a" + "int16x8_t a", + "int8x16_t b" ], "return_type": { - "value": "int16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.8H" + }, + "b": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQABS" + "SADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabsq_s16", + "name": "vpadalq_u16", "arguments": [ - "int16x8_t a" + "uint32x4_t a", + "uint16x8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.4S" + }, + "b": { "register": "Vn.8H" } }, @@ -64552,21 +267669,25 @@ ], "instructions": [ [ - "SQABS" + "UADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabsq_s32", + "name": "vpadalq_u32", "arguments": [ - "int32x4_t a" + "uint64x2_t a", + "uint32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.2D" + }, + "b": { "register": "Vn.4S" } }, @@ -64577,84 +267698,99 @@ ], "instructions": [ [ - "SQABS" + "UADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabsq_s64", + "name": "vpadalq_u8", "arguments": [ - "int64x2_t a" + "uint16x8_t a", + "uint8x16_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.8H" + }, + "b": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQABS" + "UADALP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabsq_s8", + "name": "vpadd_f16", "arguments": [ - "int8x16_t a" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "int8x16_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SQABS" + "FADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqabss_s32", + "name": "vpadd_f32", "arguments": [ - "int32_t a" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int32_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQABS" + "FADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadd_s16", + "name": "vpadd_s16", "arguments": [ "int16x4_t a", "int16x4_t b" @@ -64677,13 +267813,13 @@ ], "instructions": [ [ - "SQADD" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadd_s32", + "name": "vpadd_s32", "arguments": [ "int32x2_t a", "int32x2_t b" @@ -64706,26 +267842,26 @@ ], "instructions": [ [ - "SQADD" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadd_s64", + "name": "vpadd_s8", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int64x1_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.8B" }, "b": { - "register": "Dm" + "register": "Vm.8B" } }, "Architectures": [ @@ -64735,26 +267871,26 @@ ], "instructions": [ [ - "SQADD" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadd_s8", + "name": "vpadd_u16", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "int8x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4H" }, "b": { - "register": "Vm.8B" + "register": "Vm.4H" } }, "Architectures": [ @@ -64764,26 +267900,26 @@ ], "instructions": [ [ - "SQADD" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadd_u16", + "name": "vpadd_u32", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.2S" }, "b": { - "register": "Vm.4H" + "register": "Vm.2S" } }, "Architectures": [ @@ -64793,26 +267929,26 @@ ], "instructions": [ [ - "UQADD" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadd_u32", + "name": "vpadd_u8", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.8B" }, "b": { - "register": "Vm.2S" + "register": "Vm.8B" } }, "Architectures": [ @@ -64822,84 +267958,68 @@ ], "instructions": [ [ - "UQADD" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadd_u64", + "name": "vpaddd_f64", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "float64x2_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "b": { - "register": "Dm" + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQADD" + "FADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadd_u8", + "name": "vpaddd_s64", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "int64x2_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQADD" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddb_s8", + "name": "vpaddd_u64", "arguments": [ - "int8_t a", - "int8_t b" + "uint64x2_t a" ], "return_type": { - "value": "int8_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" - }, - "b": { - "register": "Bm" + "register": "Vn.2D" } }, "Architectures": [ @@ -64907,161 +268027,147 @@ ], "instructions": [ [ - "SQADD" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddb_u8", + "name": "vpaddl_s16", "arguments": [ - "uint8_t a", - "uint8_t b" + "int16x4_t a" ], "return_type": { - "value": "uint8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" - }, - "b": { - "register": "Bm" + "register": "Vn.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQADD" + "SADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddd_s64", + "name": "vpaddl_s32", "arguments": [ - "int64_t a", - "int64_t b" + "int32x2_t a" ], "return_type": { - "value": "int64_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "b": { - "register": "Dm" + "register": "Vn.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQADD" + "SADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddd_u64", + "name": "vpaddl_s8", "arguments": [ - "uint64_t a", - "uint64_t b" + "int8x8_t a" ], "return_type": { - "value": "uint64_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "b": { - "register": "Dm" + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQADD" + "SADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddh_s16", + "name": "vpaddl_u16", "arguments": [ - "int16_t a", - "int16_t b" + "uint16x4_t a" ], "return_type": { - "value": "int16_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "b": { - "register": "Hm" + "register": "Vn.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQADD" + "UADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddh_u16", + "name": "vpaddl_u32", "arguments": [ - "uint16_t a", - "uint16_t b" + "uint32x2_t a" ], "return_type": { - "value": "uint16_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "b": { - "register": "Hm" + "register": "Vn.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQADD" + "UADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddq_s16", + "name": "vpaddl_u8", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "uint8x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vn.8B" } }, "Architectures": [ @@ -65071,26 +268177,22 @@ ], "instructions": [ [ - "SQADD" + "UADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddq_s32", + "name": "vpaddlq_s16", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int16x8_t a" ], "return_type": { "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn.8H" } }, "Architectures": [ @@ -65100,26 +268202,22 @@ ], "instructions": [ [ - "SQADD" + "SADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddq_s64", + "name": "vpaddlq_s32", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "int32x4_t a" ], "return_type": { "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn.4S" } }, "Architectures": [ @@ -65129,26 +268227,22 @@ ], "instructions": [ [ - "SQADD" + "SADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddq_s8", + "name": "vpaddlq_s8", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "int8x16_t a" ], "return_type": { - "value": "int8x16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" } }, "Architectures": [ @@ -65158,26 +268252,22 @@ ], "instructions": [ [ - "SQADD" + "SADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddq_u16", + "name": "vpaddlq_u16", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "uint16x8_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" } }, "Architectures": [ @@ -65187,26 +268277,22 @@ ], "instructions": [ [ - "UQADD" + "UADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddq_u32", + "name": "vpaddlq_u32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "uint32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" } }, "Architectures": [ @@ -65216,26 +268302,22 @@ ], "instructions": [ [ - "UQADD" + "UADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddq_u64", + "name": "vpaddlq_u8", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "uint8x16_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn.16B" } }, "Architectures": [ @@ -65245,55 +268327,53 @@ ], "instructions": [ [ - "UQADD" + "UADDLP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqaddq_u8", + "name": "vpaddq_f16", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, "b": { - "register": "Vm.16B" + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQADD" + "FADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadds_s32", + "name": "vpaddq_f32", "arguments": [ - "int32_t a", - "int32_t b" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int32_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vn.4S" }, "b": { - "register": "Sm" + "register": "Vm.4S" } }, "Architectures": [ @@ -65301,26 +268381,26 @@ ], "instructions": [ [ - "SQADD" + "FADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqadds_u32", + "name": "vpaddq_f64", "arguments": [ - "uint32_t a", - "uint32_t b" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "uint32_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vn.2D" }, "b": { - "register": "Sm" + "register": "Vm.2D" } }, "Architectures": [ @@ -65328,35 +268408,53 @@ ], "instructions": [ [ - "UQADD" + "FADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_high_lane_s16", + "name": "vpaddq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vpaddq_s32", "arguments": [ "int32x4_t a", - "int16x8_t b", - "int16x4_t v", - "const int lane" + "int32x4_t b" ], "return_type": { "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.4S" }, "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Vm.4S" } }, "Architectures": [ @@ -65364,35 +268462,26 @@ ], "instructions": [ [ - "SQDMLAL2" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_high_lane_s32", + "name": "vpaddq_s64", "arguments": [ "int64x2_t a", - "int32x4_t b", - "int32x2_t v", - "const int lane" + "int64x2_t b" ], "return_type": { "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.2D" }, "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vm.2D" } }, "Architectures": [ @@ -65400,35 +268489,26 @@ ], "instructions": [ [ - "SQDMLAL2" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_high_laneq_s16", + "name": "vpaddq_s8", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16x8_t v", - "const int lane" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.16B" }, "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vm.16B" } }, "Architectures": [ @@ -65436,35 +268516,26 @@ ], "instructions": [ [ - "SQDMLAL2" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_high_laneq_s32", + "name": "vpaddq_u16", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32x4_t v", - "const int lane" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8H" }, "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ @@ -65472,30 +268543,26 @@ ], "instructions": [ [ - "SQDMLAL2" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_high_n_s16", + "name": "vpaddq_u32", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16_t c" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.4S" }, "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vm.4S" } }, "Architectures": [ @@ -65503,30 +268570,26 @@ ], "instructions": [ [ - "SQDMLAL2" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_high_n_s32", + "name": "vpaddq_u64", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32_t c" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.2D" }, "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.S[0]" + "register": "Vm.2D" } }, "Architectures": [ @@ -65534,30 +268597,26 @@ ], "instructions": [ [ - "SQDMLAL2" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_high_s16", + "name": "vpaddq_u8", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16x8_t c" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.16B" }, "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.8H" + "register": "Vm.16B" } }, "Architectures": [ @@ -65565,30 +268624,22 @@ ], "instructions": [ [ - "SQDMLAL2" + "ADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_high_s32", + "name": "vpadds_f32", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32x4_t c" + "float32x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.4S" + "register": "Vn.2S" } }, "Architectures": [ @@ -65596,72 +268647,53 @@ ], "instructions": [ [ - "SQDMLAL2" + "FADDP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_lane_s16", + "name": "vpmax_f16", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x4_t v", - "const int lane" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4H" }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { + "b": { "register": "Vm.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SQDMLAL" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_lane_s32", + "name": "vpmax_f32", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x2_t v", - "const int lane" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int64x2_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.2S" }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { + "b": { "register": "Vm.2S" } }, @@ -65672,102 +268704,84 @@ ], "instructions": [ [ - "SQDMLAL" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_laneq_s16", + "name": "vpmax_s16", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x8_t v", - "const int lane" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4H" }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMLAL" + "SMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_laneq_s32", + "name": "vpmax_s32", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x4_t v", - "const int lane" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.2S" }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMLAL" + "SMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_n_s16", + "name": "vpmax_s8", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16_t c" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.8B" }, "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vm.8B" } }, "Architectures": [ @@ -65777,30 +268791,26 @@ ], "instructions": [ [ - "SQDMLAL" + "SMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_n_s32", + "name": "vpmax_u16", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32_t c" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.4H" }, "b": { - "register": "Vn.2S" - }, - "c": { - "register": "Vm.S[0]" + "register": "Vm.4H" } }, "Architectures": [ @@ -65810,30 +268820,26 @@ ], "instructions": [ [ - "SQDMLAL" + "UMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_s16", + "name": "vpmax_u32", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x4_t c" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.2S" }, "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.4H" + "register": "Vm.2S" } }, "Architectures": [ @@ -65843,30 +268849,26 @@ ], "instructions": [ [ - "SQDMLAL" + "UMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlal_s32", + "name": "vpmax_u8", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x2_t c" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8B" }, "b": { - "register": "Vn.2S" - }, - "c": { - "register": "Vm.2S" + "register": "Vm.8B" } }, "Architectures": [ @@ -65876,34 +268878,25 @@ ], "instructions": [ [ - "SQDMLAL" + "UMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlalh_lane_s16", + "name": "vpmaxnm_f16", "arguments": [ - "int32_t a", - "int16_t b", - "int16x4_t v", - "const int lane" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "int32_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.4H" }, "b": { - "register": "Hn" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { "register": "Vm.4H" } }, @@ -65912,35 +268905,26 @@ ], "instructions": [ [ - "SQDMLAL" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlalh_laneq_s16", + "name": "vpmaxnm_f32", "arguments": [ - "int32_t a", - "int16_t b", - "int16x8_t v", - "const int lane" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int32_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.2S" }, "b": { - "register": "Hn" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vm.2S" } }, "Architectures": [ @@ -65948,30 +268932,26 @@ ], "instructions": [ [ - "SQDMLAL" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlalh_s16", + "name": "vpmaxnmq_f16", "arguments": [ - "int32_t a", - "int16_t b", - "int16_t c" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "int32_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.8H" }, "b": { - "register": "Hn" - }, - "c": { - "register": "Hm" + "register": "Vm.8H" } }, "Architectures": [ @@ -65979,35 +268959,26 @@ ], "instructions": [ [ - "SQDMLAL" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlals_lane_s32", + "name": "vpmaxnmq_f32", "arguments": [ - "int64_t a", - "int32_t b", - "int32x2_t v", - "const int lane" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int64_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.4S" }, "b": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vm.4S" } }, "Architectures": [ @@ -66015,35 +268986,26 @@ ], "instructions": [ [ - "SQDMLAL" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlals_laneq_s32", + "name": "vpmaxnmq_f64", "arguments": [ - "int64_t a", - "int32_t b", - "int32x4_t v", - "const int lane" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "int64_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.2D" }, "b": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vm.2D" } }, "Architectures": [ @@ -66051,30 +269013,45 @@ ], "instructions": [ [ - "SQDMLAL" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlals_s32", + "name": "vpmaxnmqd_f64", + "arguments": [ + "float64x2_t a" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vpmaxnms_f32", "arguments": [ - "int64_t a", - "int32_t b", - "int32_t c" + "float32x2_t a" ], "return_type": { - "value": "int64_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { - "register": "Sn" - }, - "c": { - "register": "Sm" + "register": "Vn.2S" } }, "Architectures": [ @@ -66082,35 +269059,26 @@ ], "instructions": [ [ - "SQDMLAL" + "FMAXNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_high_lane_s16", + "name": "vpmaxq_f16", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16x4_t v", - "const int lane" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.8H" }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "b": { + "register": "Vm.8H" } }, "Architectures": [ @@ -66118,35 +269086,26 @@ ], "instructions": [ [ - "SQDMLSL2" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_high_lane_s32", + "name": "vpmaxq_f32", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32x2_t v", - "const int lane" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int64x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.4S" }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "b": { + "register": "Vm.4S" } }, "Architectures": [ @@ -66154,35 +269113,26 @@ ], "instructions": [ [ - "SQDMLSL2" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_high_laneq_s16", + "name": "vpmaxq_f64", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16x8_t v", - "const int lane" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.2D" }, "b": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vm.2D" } }, "Architectures": [ @@ -66190,35 +269140,26 @@ ], "instructions": [ [ - "SQDMLSL2" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_high_laneq_s32", + "name": "vpmaxq_s16", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32x4_t v", - "const int lane" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8H" }, "b": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ @@ -66226,30 +269167,26 @@ ], "instructions": [ [ - "SQDMLSL2" + "SMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_high_n_s16", + "name": "vpmaxq_s32", "arguments": [ "int32x4_t a", - "int16x8_t b", - "int16_t c" + "int32x4_t b" ], "return_type": { "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.4S" }, "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vm.4S" } }, "Architectures": [ @@ -66257,30 +269194,26 @@ ], "instructions": [ [ - "SQDMLSL2" + "SMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_high_n_s32", + "name": "vpmaxq_s8", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32_t c" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.16B" }, "b": { - "register": "Vn.4S" - }, - "c": { - "register": "Vm.S[0]" + "register": "Vm.16B" } }, "Architectures": [ @@ -66288,29 +269221,25 @@ ], "instructions": [ [ - "SQDMLSL2" + "SMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_high_s16", + "name": "vpmaxq_u16", "arguments": [ - "int32x4_t a", - "int16x8_t b", - "int16x8_t c" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.8H" }, - "c": { + "b": { "register": "Vm.8H" } }, @@ -66319,29 +269248,25 @@ ], "instructions": [ [ - "SQDMLSL2" + "UMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_high_s32", + "name": "vpmaxq_u32", "arguments": [ - "int64x2_t a", - "int32x4_t b", - "int32x4_t c" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.4S" }, - "c": { + "b": { "register": "Vm.4S" } }, @@ -66350,111 +269275,72 @@ ], "instructions": [ [ - "SQDMLSL2" + "UMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_lane_s16", + "name": "vpmaxq_u8", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x4_t v", - "const int lane" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.16B" }, "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" + "register": "Vm.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMLSL" + "UMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_lane_s32", + "name": "vpmaxqd_f64", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x2_t v", - "const int lane" + "float64x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMLSL" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_laneq_s16", + "name": "vpmaxs_f32", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x8_t v", - "const int lane" + "float32x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vn.2S" } }, "Architectures": [ @@ -66462,66 +269348,54 @@ ], "instructions": [ [ - "SQDMLSL" + "FMAXP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_laneq_s32", + "name": "vpmin_f16", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x4_t v", - "const int lane" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "int64x2_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.4H" }, "b": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vm.4H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "SQDMLSL" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_n_s16", + "name": "vpmin_f32", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16_t c" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.2S" }, "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.H[0]" + "register": "Vm.2S" } }, "Architectures": [ @@ -66531,30 +269405,26 @@ ], "instructions": [ [ - "SQDMLSL" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_n_s32", + "name": "vpmin_s16", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32_t c" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.4H" }, "b": { - "register": "Vn.2S" - }, - "c": { - "register": "Vm.S[0]" + "register": "Vm.4H" } }, "Architectures": [ @@ -66564,30 +269434,26 @@ ], "instructions": [ [ - "SQDMLSL" + "SMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_s16", + "name": "vpmin_s32", "arguments": [ - "int32x4_t a", - "int16x4_t b", - "int16x4_t c" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.2S" }, "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.4H" + "register": "Vm.2S" } }, "Architectures": [ @@ -66597,30 +269463,26 @@ ], "instructions": [ [ - "SQDMLSL" + "SMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsl_s32", + "name": "vpmin_s8", "arguments": [ - "int64x2_t a", - "int32x2_t b", - "int32x2_t c" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8B" }, "b": { - "register": "Vn.2S" - }, - "c": { - "register": "Vm.2S" + "register": "Vm.8B" } }, "Architectures": [ @@ -66630,138 +269492,113 @@ ], "instructions": [ [ - "SQDMLSL" + "SMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlslh_lane_s16", + "name": "vpmin_u16", "arguments": [ - "int32_t a", - "int16_t b", - "int16x4_t v", - "const int lane" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "int32_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.4H" }, "b": { - "register": "Hn" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMLSL" + "UMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlslh_laneq_s16", + "name": "vpmin_u32", "arguments": [ - "int32_t a", - "int16_t b", - "int16x8_t v", - "const int lane" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "int32_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.2S" }, "b": { - "register": "Hn" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMLSL" + "UMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlslh_s16", + "name": "vpmin_u8", "arguments": [ - "int32_t a", - "int16_t b", - "int16_t c" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "int32_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.8B" }, "b": { - "register": "Hn" - }, - "c": { - "register": "Hm" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMLSL" + "UMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsls_lane_s32", + "name": "vpminnm_f16", "arguments": [ - "int64_t a", - "int32_t b", - "int32x2_t v", - "const int lane" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "int64_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.4H" }, "b": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vm.4H" } }, "Architectures": [ @@ -66769,35 +269606,26 @@ ], "instructions": [ [ - "SQDMLSL" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsls_laneq_s32", + "name": "vpminnm_f32", "arguments": [ - "int64_t a", - "int32_t b", - "int32x4_t v", - "const int lane" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int64_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.2S" }, "b": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vm.2S" } }, "Architectures": [ @@ -66805,30 +269633,26 @@ ], "instructions": [ [ - "SQDMLSL" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmlsls_s32", + "name": "vpminnmq_f16", "arguments": [ - "int64_t a", - "int32_t b", - "int32_t c" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "int64_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.8H" }, "b": { - "register": "Sn" - }, - "c": { - "register": "Sm" + "register": "Vm.8H" } }, "Architectures": [ @@ -66836,99 +269660,76 @@ ], "instructions": [ [ - "SQDMLSL" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulh_lane_s16", + "name": "vpminnmq_f32", "arguments": [ - "int16x4_t a", - "int16x4_t v", - "const int lane" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int16x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.4S" }, - "v": { - "register": "Vm.4H" + "b": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulh_lane_s32", + "name": "vpminnmq_f64", "arguments": [ - "int32x2_t a", - "int32x2_t v", - "const int lane" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "int32x2_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "register": "Vn.2D" }, - "v": { - "register": "Vm.2S" + "b": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulh_laneq_s16", + "name": "vpminnmqd_f64", "arguments": [ - "int16x4_t a", - "int16x8_t v", - "const int lane" + "float64x2_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vn.2D" } }, "Architectures": [ @@ -66936,31 +269737,22 @@ ], "instructions": [ [ - "SQDMULH" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulh_laneq_s32", + "name": "vpminnms_f32", "arguments": [ - "int32x2_t a", - "int32x4_t v", - "const int lane" + "float32x2_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" } }, "Architectures": [ @@ -66968,147 +269760,134 @@ ], "instructions": [ [ - "SQDMULH" + "FMINNMP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulh_n_s16", + "name": "vpminq_f16", "arguments": [ - "int16x4_t a", - "int16_t b" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "int16x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.8H" }, "b": { - "register": "Vm.H[0]" + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulh_n_s32", + "name": "vpminq_f32", "arguments": [ - "int32x2_t a", - "int32_t b" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int32x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4S" }, "b": { - "register": "Vm.S[0]" + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulh_s16", + "name": "vpminq_f64", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "int16x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.2D" }, "b": { - "register": "Vm.4H" + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulh_s32", + "name": "vpminq_s16", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.8H" }, "b": { - "register": "Vm.2S" + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "SMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhh_lane_s16", + "name": "vpminq_s32", "arguments": [ - "int16_t a", - "int16x4_t v", - "const int lane" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "int16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.4S" }, - "v": { - "register": "Vm.4H" + "b": { + "register": "Vm.4S" } }, "Architectures": [ @@ -67116,31 +269895,26 @@ ], "instructions": [ [ - "SQDMULH" + "SMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhh_laneq_s16", + "name": "vpminq_s8", "arguments": [ - "int16_t a", - "int16x8_t v", - "const int lane" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Vn.16B" }, - "v": { - "register": "Vm.8H" + "b": { + "register": "Vm.16B" } }, "Architectures": [ @@ -67148,26 +269922,26 @@ ], "instructions": [ [ - "SQDMULH" + "SMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhh_s16", + "name": "vpminq_u16", "arguments": [ - "int16_t a", - "int16_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.8H" }, "b": { - "register": "Hm" + "register": "Vm.8H" } }, "Architectures": [ @@ -67175,99 +269949,76 @@ ], "instructions": [ [ - "SQDMULH" + "UMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhq_lane_s16", + "name": "vpminq_u32", "arguments": [ - "int16x8_t a", - "int16x4_t v", - "const int lane" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.4S" }, - "v": { - "register": "Vm.4H" + "b": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "UMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhq_lane_s32", + "name": "vpminq_u8", "arguments": [ - "int32x4_t a", - "int32x2_t v", - "const int lane" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "register": "Vn.16B" }, - "v": { - "register": "Vm.2S" + "b": { + "register": "Vm.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "UMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhq_laneq_s16", + "name": "vpminqd_f64", "arguments": [ - "int16x8_t a", - "int16x8_t v", - "const int lane" + "float64x2_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vn.2D" } }, "Architectures": [ @@ -67275,31 +270026,22 @@ ], "instructions": [ [ - "SQDMULH" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhq_laneq_s32", + "name": "vpmins_f32", "arguments": [ - "int32x4_t a", - "int32x4_t v", - "const int lane" + "float32x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vn.2S" } }, "Architectures": [ @@ -67307,26 +270049,22 @@ ], "instructions": [ [ - "SQDMULH" + "FMINP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhq_n_s16", + "name": "vqabs_s16", "arguments": [ - "int16x8_t a", - "int16_t b" + "int16x4_t a" ], "return_type": { - "value": "int16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.H[0]" + "register": "Vn.4H" } }, "Architectures": [ @@ -67336,26 +270074,22 @@ ], "instructions": [ [ - "SQDMULH" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhq_n_s32", + "name": "vqabs_s32", "arguments": [ - "int32x4_t a", - "int32_t b" + "int32x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.S[0]" + "register": "Vn.2S" } }, "Architectures": [ @@ -67365,55 +270099,45 @@ ], "instructions": [ [ - "SQDMULH" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhq_s16", + "name": "vqabs_s64", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int64x1_t a" ], "return_type": { - "value": "int16x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULH" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhq_s32", + "name": "vqabs_s8", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int8x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn.8B" } }, "Architectures": [ @@ -67423,31 +270147,22 @@ ], "instructions": [ [ - "SQDMULH" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhs_lane_s32", + "name": "vqabsb_s8", "arguments": [ - "int32_t a", - "int32x2_t v", - "const int lane" + "int8_t a" ], "return_type": { - "value": "int32_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Bn" } }, "Architectures": [ @@ -67455,31 +270170,22 @@ ], "instructions": [ [ - "SQDMULH" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhs_laneq_s32", + "name": "vqabsd_s64", "arguments": [ - "int32_t a", - "int32x4_t v", - "const int lane" + "int64_t a" ], "return_type": { - "value": "int32_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Dn" } }, "Architectures": [ @@ -67487,26 +270193,22 @@ ], "instructions": [ [ - "SQDMULH" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulhs_s32", + "name": "vqabsh_s16", "arguments": [ - "int32_t a", - "int32_t b" + "int16_t a" ], "return_type": { - "value": "int32_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "b": { - "register": "Sm" + "register": "Hn" } }, "Architectures": [ @@ -67514,95 +270216,72 @@ ], "instructions": [ [ - "SQDMULH" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_high_lane_s16", + "name": "vqabsq_s16", "arguments": [ - "int16x8_t a", - "int16x4_t v", - "const int lane" + "int16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMULL2" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_high_lane_s32", + "name": "vqabsq_s32", "arguments": [ - "int32x4_t a", - "int32x2_t v", - "const int lane" + "int32x4_t a" ], "return_type": { - "value": "int64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMULL2" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_high_laneq_s16", + "name": "vqabsq_s64", "arguments": [ - "int16x8_t a", - "int16x8_t v", - "const int lane" + "int64x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "register": "Vn.2D" } }, "Architectures": [ @@ -67610,58 +270289,47 @@ ], "instructions": [ [ - "SQDMULL2" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_high_laneq_s32", + "name": "vqabsq_s8", "arguments": [ - "int32x4_t a", - "int32x4_t v", - "const int lane" + "int8x16_t a" ], "return_type": { - "value": "int64x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMULL2" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_high_n_s16", + "name": "vqabss_s32", "arguments": [ - "int16x8_t a", - "int16_t b" + "int32_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.H[0]" + "register": "Sn" } }, "Architectures": [ @@ -67669,112 +270337,113 @@ ], "instructions": [ [ - "SQDMULL2" + "SQABS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_high_n_s32", + "name": "vqadd_s16", "arguments": [ - "int32x4_t a", - "int32_t b" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.4H" }, "b": { - "register": "Vm.S[0]" + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMULL2" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_high_s16", + "name": "vqadd_s32", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2S" }, "b": { - "register": "Vm.8H" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMULL2" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_high_s32", + "name": "vqadd_s64", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Dn" }, "b": { - "register": "Vm.4S" + "register": "Dm" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMULL2" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_lane_s16", + "name": "vqadd_s8", "arguments": [ - "int16x4_t a", - "int16x4_t v", - "const int lane" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.8B" }, - "v": { - "register": "Vm.4H" + "b": { + "register": "Vm.8B" } }, "Architectures": [ @@ -67784,31 +270453,26 @@ ], "instructions": [ [ - "SQDMULL" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_lane_s32", + "name": "vqadd_u16", "arguments": [ - "int32x2_t a", - "int32x2_t v", - "const int lane" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "register": "Vn.4H" }, - "v": { - "register": "Vm.2S" + "b": { + "register": "Vm.4H" } }, "Architectures": [ @@ -67818,90 +270482,84 @@ ], "instructions": [ [ - "SQDMULL" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_laneq_s16", + "name": "vqadd_u32", "arguments": [ - "int16x4_t a", - "int16x8_t v", - "const int lane" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Vn.2S" }, - "v": { - "register": "Vm.8H" + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMULL" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_laneq_s32", + "name": "vqadd_u64", "arguments": [ - "int32x2_t a", - "int32x4_t v", - "const int lane" + "uint64x1_t a", + "uint64x1_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Dn" }, - "v": { - "register": "Vm.4S" + "b": { + "register": "Dm" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMULL" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_n_s16", + "name": "vqadd_u8", "arguments": [ - "int16x4_t a", - "int16_t b" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.8B" }, "b": { - "register": "Vm.H[0]" + "register": "Vm.8B" } }, "Architectures": [ @@ -67911,118 +270569,107 @@ ], "instructions": [ [ - "SQDMULL" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_n_s32", + "name": "vqaddb_s8", "arguments": [ - "int32x2_t a", - "int32_t b" + "int8_t a", + "int8_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Bn" }, "b": { - "register": "Vm.S[0]" + "register": "Bm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULL" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_s16", + "name": "vqaddb_u8", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "uint8_t a", + "uint8_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Bn" }, "b": { - "register": "Vm.4H" + "register": "Bm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULL" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmull_s32", + "name": "vqaddd_s64", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "int64_t a", + "int64_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Dn" }, "b": { - "register": "Vm.2S" + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQDMULL" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmullh_lane_s16", + "name": "vqaddd_u64", "arguments": [ - "int16_t a", - "int16x4_t v", - "const int lane" + "uint64_t a", + "uint64_t b" ], "return_type": { - "value": "int32_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Dn" }, - "v": { - "register": "Vm.4H" + "b": { + "register": "Dm" } }, "Architectures": [ @@ -68030,31 +270677,26 @@ ], "instructions": [ [ - "SQDMULL" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmullh_laneq_s16", + "name": "vqaddh_s16", "arguments": [ "int16_t a", - "int16x8_t v", - "const int lane" + "int16_t b" ], "return_type": { - "value": "int32_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { "register": "Hn" }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "b": { + "register": "Hm" } }, "Architectures": [ @@ -68062,19 +270704,19 @@ ], "instructions": [ [ - "SQDMULL" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmullh_s16", + "name": "vqaddh_u16", "arguments": [ - "int16_t a", - "int16_t b" + "uint16_t a", + "uint16_t b" ], "return_type": { - "value": "int32_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { @@ -68089,252 +270731,258 @@ ], "instructions": [ [ - "SQDMULL" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulls_lane_s32", + "name": "vqaddq_s16", "arguments": [ - "int32_t a", - "int32x2_t v", - "const int lane" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int64_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "register": "Vn.8H" }, - "v": { - "register": "Vm.2S" + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMULL" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulls_laneq_s32", + "name": "vqaddq_s32", "arguments": [ - "int32_t a", - "int32x4_t v", - "const int lane" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "int64_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.4S" }, - "v": { + "b": { "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMULL" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqdmulls_s32", + "name": "vqaddq_s64", "arguments": [ - "int32_t a", - "int32_t b" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "int64_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vn.2D" }, "b": { - "register": "Sm" + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQDMULL" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_high_s16", + "name": "vqaddq_s8", "arguments": [ - "int8x8_t r", - "int16x8_t a" + "int8x16_t a", + "int8x16_t b" ], "return_type": { "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.16B" }, - "r": { - "register": "Vd.8B" + "b": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQXTN2" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_high_s32", + "name": "vqaddq_u16", "arguments": [ - "int16x4_t r", - "int32x4_t a" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8H" }, - "r": { - "register": "Vd.4H" + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQXTN2" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_high_s64", + "name": "vqaddq_u32", "arguments": [ - "int32x2_t r", - "int64x2_t a" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4S" }, - "r": { - "register": "Vd.2S" + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQXTN2" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_high_u16", + "name": "vqaddq_u64", "arguments": [ - "uint8x8_t r", - "uint16x8_t a" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2D" }, - "r": { - "register": "Vd.8B" + "b": { + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQXTN2" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_high_u32", + "name": "vqaddq_u8", "arguments": [ - "uint16x4_t r", - "uint32x4_t a" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.16B" }, - "r": { - "register": "Vd.4H" + "b": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQXTN2" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_high_u64", + "name": "vqadds_s32", "arguments": [ - "uint32x2_t r", - "uint64x2_t a" + "int32_t a", + "int32_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Sn" }, - "r": { - "register": "Vd.2S" + "b": { + "register": "Sm" } }, "Architectures": [ @@ -68342,172 +270990,232 @@ ], "instructions": [ [ - "UQXTN2" + "SQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_s16", + "name": "vqadds_u32", "arguments": [ - "int16x8_t a" + "uint32_t a", + "uint32_t b" ], "return_type": { - "value": "int8x8_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Sn" + }, + "b": { + "register": "Sm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQXTN" + "UQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_s32", + "name": "vqdmlal_high_lane_s16", "arguments": [ - "int32x4_t a" + "int32x4_t a", + "int16x8_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQXTN" + "SQDMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_s64", + "name": "vqdmlal_high_lane_s32", "arguments": [ - "int64x2_t a" + "int64x2_t a", + "int32x4_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQXTN" + "SQDMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_u16", + "name": "vqdmlal_high_laneq_s16", "arguments": [ - "uint16x8_t a" + "int32x4_t a", + "int16x8_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.4S" + }, + "b": { "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQXTN" + "SQDMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_u32", + "name": "vqdmlal_high_laneq_s32", "arguments": [ - "uint32x4_t a" + "int64x2_t a", + "int32x4_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.2D" + }, + "b": { "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQXTN" + "SQDMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovn_u64", + "name": "vqdmlal_high_n_s16", "arguments": [ - "uint64x2_t a" + "int32x4_t a", + "int16x8_t b", + "int16_t c" ], "return_type": { - "value": "uint32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQXTN" + "SQDMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovnd_s64", + "name": "vqdmlal_high_n_s32", "arguments": [ - "int64_t a" + "int64x2_t a", + "int32x4_t b", + "int32_t c" ], "return_type": { - "value": "int32_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ @@ -68515,22 +271223,30 @@ ], "instructions": [ [ - "SQXTN" + "SQDMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovnd_u64", + "name": "vqdmlal_high_s16", "arguments": [ - "uint64_t a" + "int32x4_t a", + "int16x8_t b", + "int16x8_t c" ], "return_type": { - "value": "uint32_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" } }, "Architectures": [ @@ -68538,22 +271254,30 @@ ], "instructions": [ [ - "UQXTN" + "SQDMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovnh_s16", + "name": "vqdmlal_high_s32", "arguments": [ - "int16_t a" + "int64x2_t a", + "int32x4_t b", + "int32x4_t c" ], "return_type": { - "value": "int8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" } }, "Architectures": [ @@ -68561,68 +271285,111 @@ ], "instructions": [ [ - "SQXTN" + "SQDMLAL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovnh_u16", + "name": "vqdmlal_lane_s16", "arguments": [ - "uint16_t a" + "int32x4_t a", + "int16x4_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQXTN" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovns_s32", + "name": "vqdmlal_lane_s32", "arguments": [ - "int32_t a" + "int64x2_t a", + "int32x2_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "int16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQXTN" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovns_u32", + "name": "vqdmlal_laneq_s16", "arguments": [ - "uint32_t a" + "int32x4_t a", + "int16x4_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "uint16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -68630,26 +271397,35 @@ ], "instructions": [ [ - "UQXTN" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovun_high_s16", + "name": "vqdmlal_laneq_s32", "arguments": [ - "uint8x8_t r", - "int16x8_t a" + "int64x2_t a", + "int32x2_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.2D" }, - "r": { - "register": "Vd.8B" + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ @@ -68657,76 +271433,96 @@ ], "instructions": [ [ - "SQXTUN2" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovun_high_s32", + "name": "vqdmlal_n_s16", "arguments": [ - "uint16x4_t r", - "int32x4_t a" + "int32x4_t a", + "int16x4_t b", + "int16_t c" ], "return_type": { - "value": "uint16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4S" }, - "r": { - "register": "Vd.4H" + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQXTUN2" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovun_high_s64", + "name": "vqdmlal_n_s32", "arguments": [ - "uint32x2_t r", - "int64x2_t a" + "int64x2_t a", + "int32x2_t b", + "int32_t c" ], "return_type": { - "value": "uint32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.2D" }, - "r": { - "register": "Vd.2S" + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQXTUN2" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovun_s16", + "name": "vqdmlal_s16", "arguments": [ - "int16x8_t a" + "int32x4_t a", + "int16x4_t b", + "int16x4_t c" ], "return_type": { - "value": "uint8x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" } }, "Architectures": [ @@ -68736,22 +271532,30 @@ ], "instructions": [ [ - "SQXTUN" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovun_s32", + "name": "vqdmlal_s32", "arguments": [ - "int32x4_t a" + "int64x2_t a", + "int32x2_t b", + "int32x2_t c" ], "return_type": { - "value": "uint16x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" } }, "Architectures": [ @@ -68761,47 +271565,71 @@ ], "instructions": [ [ - "SQXTUN" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovun_s64", + "name": "vqdmlalh_lane_s16", "arguments": [ - "int64x2_t a" + "int32_t a", + "int16_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Sd" + }, + "b": { + "register": "Hn" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQXTUN" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovund_s64", + "name": "vqdmlalh_laneq_s16", "arguments": [ - "int64_t a" + "int32_t a", + "int16_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "uint32_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Sd" + }, + "b": { + "register": "Hn" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -68809,22 +271637,30 @@ ], "instructions": [ [ - "SQXTUN" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovunh_s16", + "name": "vqdmlalh_s16", "arguments": [ - "int16_t a" + "int32_t a", + "int16_t b", + "int16_t c" ], "return_type": { - "value": "uint8_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { + "register": "Sd" + }, + "b": { "register": "Hn" + }, + "c": { + "register": "Hm" } }, "Architectures": [ @@ -68832,22 +271668,35 @@ ], "instructions": [ [ - "SQXTUN" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqmovuns_s32", + "name": "vqdmlals_lane_s32", "arguments": [ - "int32_t a" + "int64_t a", + "int32_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint16_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { + "register": "Dd" + }, + "b": { "register": "Sn" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -68855,72 +271704,102 @@ ], "instructions": [ [ - "SQXTUN" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqneg_s16", + "name": "vqdmlals_laneq_s32", "arguments": [ - "int16x4_t a" + "int64_t a", + "int32_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Dd" + }, + "b": { + "register": "Sn" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQNEG" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqneg_s32", + "name": "vqdmlals_s32", "arguments": [ - "int32x2_t a" + "int64_t a", + "int32_t b", + "int32_t c" ], "return_type": { - "value": "int32x2_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Dd" + }, + "b": { + "register": "Sn" + }, + "c": { + "register": "Sm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQNEG" + "SQDMLAL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqneg_s64", + "name": "vqdmlsl_high_lane_s16", "arguments": [ - "int64x1_t a" + "int32x4_t a", + "int16x8_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "int64x1_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -68928,47 +271807,71 @@ ], "instructions": [ [ - "SQNEG" + "SQDMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqneg_s8", + "name": "vqdmlsl_high_lane_s32", "arguments": [ - "int8x8_t a" + "int64x2_t a", + "int32x4_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQNEG" + "SQDMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqnegb_s8", + "name": "vqdmlsl_high_laneq_s16", "arguments": [ - "int8_t a" + "int32x4_t a", + "int16x8_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "int8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -68976,22 +271879,35 @@ ], "instructions": [ [ - "SQNEG" + "SQDMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqnegd_s64", + "name": "vqdmlsl_high_laneq_s32", "arguments": [ - "int64_t a" + "int64x2_t a", + "int32x4_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "int64_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ @@ -68999,22 +271915,30 @@ ], "instructions": [ [ - "SQNEG" + "SQDMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqnegh_s16", + "name": "vqdmlsl_high_n_s16", "arguments": [ - "int16_t a" + "int32x4_t a", + "int16x8_t b", + "int16_t c" ], "return_type": { - "value": "int16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ @@ -69022,72 +271946,92 @@ ], "instructions": [ [ - "SQNEG" + "SQDMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqnegq_s16", + "name": "vqdmlsl_high_n_s32", "arguments": [ - "int16x8_t a" + "int64x2_t a", + "int32x4_t b", + "int32_t c" ], "return_type": { - "value": "int16x8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQNEG" + "SQDMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqnegq_s32", + "name": "vqdmlsl_high_s16", "arguments": [ - "int32x4_t a" + "int32x4_t a", + "int16x8_t b", + "int16x8_t c" ], "return_type": { "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQNEG" + "SQDMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqnegq_s64", + "name": "vqdmlsl_high_s32", "arguments": [ - "int64x2_t a" + "int64x2_t a", + "int32x4_t b", + "int32x4_t c" ], "return_type": { "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" } }, "Architectures": [ @@ -69095,22 +272039,35 @@ ], "instructions": [ [ - "SQNEG" + "SQDMLSL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqnegq_s8", + "name": "vqdmlsl_lane_s16", "arguments": [ - "int8x16_t a" + "int32x4_t a", + "int16x4_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "int8x16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -69120,58 +272077,73 @@ ], "instructions": [ [ - "SQNEG" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqnegs_s32", + "name": "vqdmlsl_lane_s32", "arguments": [ - "int32_t a" + "int64x2_t a", + "int32x2_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "int32_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQNEG" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlah_lane_s16", + "name": "vqdmlsl_laneq_s16", "arguments": [ - "int16x4_t a", + "int32x4_t a", "int16x4_t b", - "int16x4_t v", + "int16x8_t v", "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.4S" }, "b": { "register": "Vn.4H" }, "lane": { "minimum": 0, - "maximum": 3 + "maximum": 7 }, "v": { - "register": "Vm.4H" + "register": "Vm.8H" } }, "Architectures": [ @@ -69179,35 +272151,35 @@ ], "instructions": [ [ - "SQRDMLAH" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlah_lane_s32", + "name": "vqdmlsl_laneq_s32", "arguments": [ - "int32x2_t a", + "int64x2_t a", "int32x2_t b", - "int32x2_t v", + "int32x4_t v", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.2D" }, "b": { "register": "Vn.2S" }, "lane": { "minimum": 0, - "maximum": 1 + "maximum": 3 }, "v": { - "register": "Vm.2S" + "register": "Vm.4S" } }, "Architectures": [ @@ -69215,96 +272187,90 @@ ], "instructions": [ [ - "SQRDMLAH" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlah_laneq_s16", + "name": "vqdmlsl_n_s16", "arguments": [ - "int16x4_t a", + "int32x4_t a", "int16x4_t b", - "int16x8_t v", - "const int lane" + "int16_t c" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.4S" }, "b": { "register": "Vn.4H" }, - "lane": { - "minimum": 0, - "maximum": 7 - }, - "v": { - "register": "Vm.8H" + "c": { + "register": "Vm.H[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLAH" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlah_laneq_s32", + "name": "vqdmlsl_n_s32", "arguments": [ - "int32x2_t a", + "int64x2_t a", "int32x2_t b", - "int32x4_t v", - "const int lane" + "int32_t c" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.2D" }, "b": { "register": "Vn.2S" }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "c": { + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLAH" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlah_s16", + "name": "vqdmlsl_s16", "arguments": [ - "int16x4_t a", + "int32x4_t a", "int16x4_t b", "int16x4_t c" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.4S" }, "b": { "register": "Vn.4H" @@ -69314,28 +272280,30 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLAH" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlah_s32", + "name": "vqdmlsl_s32", "arguments": [ - "int32x2_t a", + "int64x2_t a", "int32x2_t b", "int32x2_t c" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.2D" }, "b": { "register": "Vn.2S" @@ -69345,29 +272313,31 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLAH" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahh_lane_s16", + "name": "vqdmlslh_lane_s16", "arguments": [ - "int16_t a", + "int32_t a", "int16_t b", "int16x4_t v", "const int lane" ], "return_type": { - "value": "int16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Hd" + "register": "Sd" }, "b": { "register": "Hn" @@ -69385,25 +272355,25 @@ ], "instructions": [ [ - "SQRDMLAH" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahh_laneq_s16", + "name": "vqdmlslh_laneq_s16", "arguments": [ - "int16_t a", + "int32_t a", "int16_t b", "int16x8_t v", "const int lane" ], "return_type": { - "value": "int16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Hd" + "register": "Sd" }, "b": { "register": "Hn" @@ -69421,24 +272391,24 @@ ], "instructions": [ [ - "SQRDMLAH" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahh_s16", + "name": "vqdmlslh_s16", "arguments": [ - "int16_t a", + "int32_t a", "int16_t b", "int16_t c" ], "return_type": { - "value": "int16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Hd" + "register": "Sd" }, "b": { "register": "Hn" @@ -69452,28 +272422,127 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMLSL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahq_lane_s16", + "name": "vqdmlsls_lane_s32", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16x4_t v", + "int64_t a", + "int32_t b", + "int32x2_t v", "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Dd" }, "b": { - "register": "Vn.8H" + "register": "Sn" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqdmlsls_laneq_s32", + "arguments": [ + "int64_t a", + "int32_t b", + "int32x4_t v", + "const int lane" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dd" + }, + "b": { + "register": "Sn" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqdmlsls_s32", + "arguments": [ + "int64_t a", + "int32_t b", + "int32_t c" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dd" + }, + "b": { + "register": "Sn" + }, + "c": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqdmulh_lane_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t v", + "const int lane" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" }, "lane": { "minimum": 0, @@ -69484,32 +272553,30 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLAH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahq_lane_s32", + "name": "vqdmulh_lane_s32", "arguments": [ - "int32x4_t a", - "int32x4_t b", + "int32x2_t a", "int32x2_t v", "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" + "register": "Vn.2S" }, "lane": { "minimum": 0, @@ -69520,31 +272587,29 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLAH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahq_laneq_s16", + "name": "vqdmulh_laneq_s16", "arguments": [ - "int16x8_t a", - "int16x8_t b", + "int16x4_t a", "int16x8_t v", "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { "register": "Vn.4H" }, "lane": { @@ -69560,27 +272625,23 @@ ], "instructions": [ [ - "SQRDMLAH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahq_laneq_s32", + "name": "vqdmulh_laneq_s32", "arguments": [ - "int32x4_t a", - "int32x4_t b", + "int32x2_t a", "int32x4_t v", "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { "register": "Vn.2S" }, "lane": { @@ -69596,97 +272657,147 @@ ], "instructions": [ [ - "SQRDMLAH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahq_s16", + "name": "vqdmulh_n_s16", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "int16x8_t c" + "int16x4_t a", + "int16_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vn.4H" }, "b": { - "register": "Vn.8H" - }, - "c": { - "register": "Vm.8H" + "register": "Vm.H[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLAH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahq_s32", + "name": "vqdmulh_n_s32", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "int32x4_t c" + "int32x2_t a", + "int32_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.2S" }, "b": { - "register": "Vn.4S" + "register": "Vm.S[0]" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqdmulh_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" }, - "c": { - "register": "Vm.4S" + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLAH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahs_lane_s32", + "name": "vqdmulh_s32", "arguments": [ - "int32_t a", - "int32_t b", - "int32x2_t v", - "const int lane" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "int32_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.2S" }, "b": { - "register": "Sn" + "register": "Vm.2S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqdmulhh_lane_s16", + "arguments": [ + "int16_t a", + "int16x4_t v", + "const int lane" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" }, "lane": { "minimum": 0, - "maximum": 1 + "maximum": 3 }, "v": { - "register": "Vm.2S" + "register": "Vm.4H" } }, "Architectures": [ @@ -69694,35 +272805,31 @@ ], "instructions": [ [ - "SQRDMLAH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahs_laneq_s32", + "name": "vqdmulhh_laneq_s16", "arguments": [ - "int32_t a", - "int32_t b", - "int32x4_t v", + "int16_t a", + "int16x8_t v", "const int lane" ], "return_type": { - "value": "int32_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" - }, - "b": { - "register": "Sn" + "register": "Hn" }, "lane": { "minimum": 0, - "maximum": 3 + "maximum": 7 }, "v": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ @@ -69730,30 +272837,26 @@ ], "instructions": [ [ - "SQRDMLAH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlahs_s32", + "name": "vqdmulhh_s16", "arguments": [ - "int32_t a", - "int32_t b", - "int32_t c" + "int16_t a", + "int16_t b" ], "return_type": { - "value": "int32_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Hn" }, "b": { - "register": "Sn" - }, - "c": { - "register": "Sm" + "register": "Hm" } }, "Architectures": [ @@ -69761,28 +272864,24 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlsh_lane_s16", + "name": "vqdmulhq_lane_s16", "arguments": [ - "int16x4_t a", - "int16x4_t b", + "int16x8_t a", "int16x4_t v", "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" + "register": "Vn.8H" }, "lane": { "minimum": 0, @@ -69793,32 +272892,30 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlsh_lane_s32", + "name": "vqdmulhq_lane_s32", "arguments": [ - "int32x2_t a", - "int32x2_t b", + "int32x4_t a", "int32x2_t v", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" + "register": "Vn.4S" }, "lane": { "minimum": 0, @@ -69829,32 +272926,30 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlsh_laneq_s16", + "name": "vqdmulhq_laneq_s16", "arguments": [ - "int16x4_t a", - "int16x4_t b", + "int16x8_t a", "int16x8_t v", "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" + "register": "Vn.8H" }, "lane": { "minimum": 0, @@ -69869,28 +272964,24 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlsh_laneq_s32", + "name": "vqdmulhq_laneq_s32", "arguments": [ - "int32x2_t a", - "int32x2_t b", + "int32x4_t a", "int32x4_t v", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" + "register": "Vn.4S" }, "lane": { "minimum": 0, @@ -69905,97 +272996,147 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlsh_s16", + "name": "vqdmulhq_n_s16", "arguments": [ - "int16x4_t a", - "int16x4_t b", - "int16x4_t c" + "int16x8_t a", + "int16_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.8H" }, "b": { - "register": "Vn.4H" - }, - "c": { - "register": "Vm.4H" + "register": "Vm.H[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlsh_s32", + "name": "vqdmulhq_n_s32", "arguments": [ - "int32x2_t a", - "int32x2_t b", - "int32x2_t c" + "int32x4_t a", + "int32_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.4S" }, "b": { - "register": "Vn.2S" + "register": "Vm.S[0]" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqdmulhq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" }, - "c": { - "register": "Vm.2S" + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshh_lane_s16", + "name": "vqdmulhq_s32", "arguments": [ - "int16_t a", - "int16_t b", - "int16x4_t v", - "const int lane" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "int16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hd" + "register": "Vn.4S" }, "b": { - "register": "Hn" + "register": "Vm.4S" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqdmulhs_lane_s32", + "arguments": [ + "int32_t a", + "int32x2_t v", + "const int lane" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" }, "lane": { "minimum": 0, - "maximum": 3 + "maximum": 1 }, "v": { - "register": "Vm.4H" + "register": "Vm.2S" } }, "Architectures": [ @@ -70003,35 +273144,31 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshh_laneq_s16", + "name": "vqdmulhs_laneq_s32", "arguments": [ - "int16_t a", - "int16_t b", - "int16x8_t v", + "int32_t a", + "int32x4_t v", "const int lane" ], "return_type": { - "value": "int16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Hd" - }, - "b": { - "register": "Hn" + "register": "Sn" }, "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 }, "v": { - "register": "Vm.8H" + "register": "Vm.4S" } }, "Architectures": [ @@ -70039,30 +273176,26 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshh_s16", + "name": "vqdmulhs_s32", "arguments": [ - "int16_t a", - "int16_t b", - "int16_t c" + "int32_t a", + "int32_t b" ], "return_type": { - "value": "int16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Hd" + "register": "Sn" }, "b": { - "register": "Hn" - }, - "c": { - "register": "Hm" + "register": "Sm" } }, "Architectures": [ @@ -70070,27 +273203,23 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshq_lane_s16", + "name": "vqdmull_high_lane_s16", "arguments": [ "int16x8_t a", - "int16x8_t b", "int16x4_t v", "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { "register": "Vn.8H" }, "lane": { @@ -70106,27 +273235,23 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshq_lane_s32", + "name": "vqdmull_high_lane_s32", "arguments": [ "int32x4_t a", - "int32x4_t b", "int32x2_t v", "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4S" }, "lane": { @@ -70142,28 +273267,24 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshq_laneq_s16", + "name": "vqdmull_high_laneq_s16", "arguments": [ "int16x8_t a", - "int16x8_t b", "int16x8_t v", "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" + "register": "Vn.8H" }, "lane": { "minimum": 0, @@ -70178,28 +273299,24 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshq_laneq_s32", + "name": "vqdmull_high_laneq_s32", "arguments": [ "int32x4_t a", - "int32x4_t b", "int32x4_t v", "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" + "register": "Vn.4S" }, "lane": { "minimum": 0, @@ -70214,30 +273331,26 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshq_s16", + "name": "vqdmull_high_n_s16", "arguments": [ "int16x8_t a", - "int16x8_t b", - "int16x8_t c" + "int16_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { "register": "Vn.8H" }, - "c": { - "register": "Vm.8H" + "b": { + "register": "Vm.H[0]" } }, "Architectures": [ @@ -70245,66 +273358,26 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshq_s32", + "name": "vqdmull_high_n_s32", "arguments": [ "int32x4_t a", - "int32x4_t b", - "int32x4_t c" + "int32_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4S" }, - "c": { - "register": "Vm.4S" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "SQRDMLSH" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vqrdmlshs_lane_s32", - "arguments": [ - "int32_t a", - "int32_t b", - "int32x2_t v", - "const int lane" - ], - "return_type": { - "value": "int32_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Sd" - }, "b": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vm.S[0]" } }, "Architectures": [ @@ -70312,35 +273385,26 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshs_laneq_s32", + "name": "vqdmull_high_s16", "arguments": [ - "int32_t a", - "int32_t b", - "int32x4_t v", - "const int lane" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int32_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.8H" }, "b": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ @@ -70348,30 +273412,26 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmlshs_s32", + "name": "vqdmull_high_s32", "arguments": [ - "int32_t a", - "int32_t b", - "int32_t c" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "int32_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.4S" }, "b": { - "register": "Sn" - }, - "c": { - "register": "Sm" + "register": "Vm.4S" } }, "Architectures": [ @@ -70379,20 +273439,20 @@ ], "instructions": [ [ - "SQRDMLSH" + "SQDMULL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulh_lane_s16", + "name": "vqdmull_lane_s16", "arguments": [ "int16x4_t a", "int16x4_t v", "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -70413,20 +273473,20 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulh_lane_s32", + "name": "vqdmull_lane_s32", "arguments": [ "int32x2_t a", "int32x2_t v", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { @@ -70447,20 +273507,20 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulh_laneq_s16", + "name": "vqdmull_laneq_s16", "arguments": [ "int16x4_t a", "int16x8_t v", "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -70479,20 +273539,20 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulh_laneq_s32", + "name": "vqdmull_laneq_s32", "arguments": [ "int32x2_t a", "int32x4_t v", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { @@ -70511,19 +273571,19 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulh_n_s16", + "name": "vqdmull_n_s16", "arguments": [ "int16x4_t a", "int16_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -70540,19 +273600,19 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulh_n_s32", + "name": "vqdmull_n_s32", "arguments": [ "int32x2_t a", "int32_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { @@ -70569,19 +273629,19 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulh_s16", + "name": "vqdmull_s16", "arguments": [ "int16x4_t a", "int16x4_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -70598,19 +273658,19 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulh_s32", + "name": "vqdmull_s32", "arguments": [ "int32x2_t a", "int32x2_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { @@ -70627,20 +273687,20 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhh_lane_s16", + "name": "vqdmullh_lane_s16", "arguments": [ "int16_t a", "int16x4_t v", "const int lane" ], "return_type": { - "value": "int16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { @@ -70659,20 +273719,20 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhh_laneq_s16", + "name": "vqdmullh_laneq_s16", "arguments": [ "int16_t a", "int16x8_t v", "const int lane" ], "return_type": { - "value": "int16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { @@ -70691,19 +273751,19 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhh_s16", + "name": "vqdmullh_s16", "arguments": [ "int16_t a", "int16_t b" ], "return_type": { - "value": "int16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { @@ -70718,99 +273778,117 @@ ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhq_lane_s16", + "name": "vqdmulls_lane_s32", "arguments": [ - "int16x8_t a", - "int16x4_t v", + "int32_t a", + "int32x2_t v", "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Sn" }, "lane": { "minimum": 0, - "maximum": 3 + "maximum": 1 }, "v": { - "register": "Vm.4H" + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhq_lane_s32", + "name": "vqdmulls_laneq_s32", "arguments": [ - "int32x4_t a", - "int32x2_t v", + "int32_t a", + "int32x4_t v", "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Sn" }, "lane": { "minimum": 0, - "maximum": 1 + "maximum": 3 }, "v": { - "register": "Vm.2S" + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRDMULH" + "SQDMULL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhq_laneq_s16", + "name": "vqdmulls_s32", "arguments": [ - "int16x8_t a", - "int16x8_t v", - "const int lane" + "int32_t a", + "int32_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Sn" }, - "lane": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Sm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqmovn_high_s16", + "arguments": [ + "int8x8_t r", + "int16x8_t a" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" }, - "v": { - "register": "Vm.8H" + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -70818,31 +273896,53 @@ ], "instructions": [ [ - "SQRDMULH" + "SQXTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhq_laneq_s32", + "name": "vqmovn_high_s32", "arguments": [ - "int32x4_t a", - "int32x4_t v", - "const int lane" + "int16x4_t r", + "int32x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" }, - "lane": { - "minimum": 0, - "maximum": 3 + "r": { + "register": "Vd.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqmovn_high_s64", + "arguments": [ + "int32x2_t r", + "int64x2_t a" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" }, - "v": { - "register": "Vm.4S" + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -70850,113 +273950,103 @@ ], "instructions": [ [ - "SQRDMULH" + "SQXTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhq_n_s16", + "name": "vqmovn_high_u16", "arguments": [ - "int16x8_t a", - "int16_t b" + "uint8x8_t r", + "uint16x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" }, - "b": { - "register": "Vm.H[0]" + "r": { + "register": "Vd.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRDMULH" + "UQXTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhq_n_s32", + "name": "vqmovn_high_u32", "arguments": [ - "int32x4_t a", - "int32_t b" + "uint16x4_t r", + "uint32x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" }, - "b": { - "register": "Vm.S[0]" + "r": { + "register": "Vd.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRDMULH" + "UQXTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhq_s16", + "name": "vqmovn_high_u64", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "uint32x2_t r", + "uint64x2_t a" ], "return_type": { - "value": "int16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2D" }, - "b": { - "register": "Vm.8H" + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRDMULH" + "UQXTN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhq_s32", + "name": "vqmovn_s16", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn.8H" } }, "Architectures": [ @@ -70966,117 +274056,97 @@ ], "instructions": [ [ - "SQRDMULH" + "SQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhs_lane_s32", + "name": "vqmovn_s32", "arguments": [ - "int32_t a", - "int32x2_t v", - "const int lane" + "int32x4_t a" ], "return_type": { - "value": "int32_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vm.2S" + "register": "Vn.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMULH" + "SQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhs_laneq_s32", + "name": "vqmovn_s64", "arguments": [ - "int32_t a", - "int32x4_t v", - "const int lane" + "int64x2_t a" ], "return_type": { - "value": "int32_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "v": { - "register": "Vm.4S" + "register": "Vn.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMULH" + "SQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrdmulhs_s32", + "name": "vqmovn_u16", "arguments": [ - "int32_t a", - "int32_t b" + "uint16x8_t a" ], "return_type": { - "value": "int32_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "b": { - "register": "Sm" + "register": "Vn.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRDMULH" + "UQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshl_s16", + "name": "vqmovn_u32", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "uint32x4_t a" ], "return_type": { - "value": "int16x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "register": "Vn.4S" } }, "Architectures": [ @@ -71086,26 +274156,22 @@ ], "instructions": [ [ - "SQRSHL" + "UQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshl_s32", + "name": "vqmovn_u64", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "uint64x2_t a" ], "return_type": { - "value": "int32x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" + "register": "Vn.2D" } }, "Architectures": [ @@ -71115,200 +274181,164 @@ ], "instructions": [ [ - "SQRSHL" + "UQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshl_s64", + "name": "vqmovnd_s64", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "int64_t a" ], "return_type": { - "value": "int64x1_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { "register": "Dn" - }, - "b": { - "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHL" + "SQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshl_s8", + "name": "vqmovnd_u64", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint64_t a" ], "return_type": { - "value": "int8x8_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHL" + "UQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshl_u16", + "name": "vqmovnh_s16", "arguments": [ - "uint16x4_t a", - "int16x4_t b" + "int16_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "register": "Hn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQRSHL" + "SQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshl_u32", + "name": "vqmovnh_u16", "arguments": [ - "uint32x2_t a", - "int32x2_t b" + "uint16_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" + "register": "Hn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQRSHL" + "UQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshl_u64", + "name": "vqmovns_s32", "arguments": [ - "uint64x1_t a", - "int64x1_t b" + "int32_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "b": { - "register": "Dm" + "register": "Sn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQRSHL" + "SQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshl_u8", + "name": "vqmovns_u32", "arguments": [ - "uint8x8_t a", - "int8x8_t b" + "uint32_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "register": "Sn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQRSHL" + "UQXTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlb_s8", + "name": "vqmovun_high_s16", "arguments": [ - "int8_t a", - "int8_t b" + "uint8x8_t r", + "int16x8_t a" ], "return_type": { - "value": "int8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" + "register": "Vn.8H" }, - "b": { - "register": "Bm" + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -71316,26 +274346,26 @@ ], "instructions": [ [ - "SQRSHL" + "SQXTUN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlb_u8", + "name": "vqmovun_high_s32", "arguments": [ - "uint8_t a", - "int8_t b" + "uint16x4_t r", + "int32x4_t a" ], "return_type": { - "value": "uint8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" + "register": "Vn.4S" }, - "b": { - "register": "Bm" + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -71343,26 +274373,26 @@ ], "instructions": [ [ - "UQRSHL" + "SQXTUN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshld_s64", + "name": "vqmovun_high_s64", "arguments": [ - "int64_t a", - "int64_t b" + "uint32x2_t r", + "int64x2_t a" ], "return_type": { - "value": "int64_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.2D" }, - "b": { - "register": "Dm" + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -71370,194 +274400,166 @@ ], "instructions": [ [ - "SQRSHL" + "SQXTUN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshld_u64", + "name": "vqmovun_s16", "arguments": [ - "uint64_t a", - "int64_t b" + "int16x8_t a" ], "return_type": { - "value": "uint64_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "b": { - "register": "Dm" + "register": "Vn.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQRSHL" + "SQXTUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlh_s16", + "name": "vqmovun_s32", "arguments": [ - "int16_t a", - "int16_t b" + "int32x4_t a" ], "return_type": { - "value": "int16_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "b": { - "register": "Hm" + "register": "Vn.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRSHL" + "SQXTUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlh_u16", + "name": "vqmovun_s64", "arguments": [ - "uint16_t a", - "int16_t b" + "int64x2_t a" ], "return_type": { - "value": "uint16_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "b": { - "register": "Hm" + "register": "Vn.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQRSHL" + "SQXTUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlq_s16", + "name": "vqmovund_s64", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int64_t a" ], "return_type": { - "value": "int16x8_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHL" + "SQXTUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlq_s32", + "name": "vqmovunh_s16", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int16_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Hn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHL" + "SQXTUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlq_s64", + "name": "vqmovuns_s32", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "int32_t a" ], "return_type": { - "value": "int64x2_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Sn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHL" + "SQXTUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlq_s8", + "name": "vqneg_s16", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "int16x4_t a" ], "return_type": { - "value": "int8x16_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "register": "Vn.4H" } }, "Architectures": [ @@ -71567,26 +274569,22 @@ ], "instructions": [ [ - "SQRSHL" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlq_u16", + "name": "vqneg_s32", "arguments": [ - "uint16x8_t a", - "int16x8_t b" + "int32x2_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vn.2S" } }, "Architectures": [ @@ -71596,55 +274594,45 @@ ], "instructions": [ [ - "UQRSHL" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlq_u32", + "name": "vqneg_s64", "arguments": [ - "uint32x4_t a", - "int32x4_t b" + "int64x1_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQRSHL" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlq_u64", + "name": "vqneg_s8", "arguments": [ - "uint64x2_t a", - "int64x2_t b" + "int8x8_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn.8B" } }, "Architectures": [ @@ -71654,55 +274642,45 @@ ], "instructions": [ [ - "UQRSHL" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshlq_u8", + "name": "vqnegb_s8", "arguments": [ - "uint8x16_t a", - "int8x16_t b" + "int8_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "register": "Bn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQRSHL" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshls_s32", + "name": "vqnegd_s64", "arguments": [ - "int32_t a", - "int32_t b" + "int64_t a" ], "return_type": { - "value": "int32_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "b": { - "register": "Sm" + "register": "Dn" } }, "Architectures": [ @@ -71710,26 +274688,22 @@ ], "instructions": [ [ - "SQRSHL" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshls_u32", + "name": "vqnegh_s16", "arguments": [ - "uint32_t a", - "int32_t b" + "int16_t a" ], "return_type": { - "value": "uint32_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "b": { - "register": "Sm" + "register": "Hn" } }, "Architectures": [ @@ -71737,95 +274711,72 @@ ], "instructions": [ [ - "UQRSHL" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_high_n_s16", + "name": "vqnegq_s16", "arguments": [ - "int8x8_t r", - "int16x8_t a", - "const int n" + "int16x8_t a" ], "return_type": { - "value": "int8x16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 8 - }, - "r": { - "register": "Vd.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRSHRN2" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_high_n_s32", + "name": "vqnegq_s32", "arguments": [ - "int16x4_t r", - "int32x4_t a", - "const int n" + "int32x4_t a" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 16 - }, - "r": { - "register": "Vd.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQRSHRN2" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_high_n_s64", + "name": "vqnegq_s64", "arguments": [ - "int32x2_t r", - "int64x2_t a", - "const int n" + "int64x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2D" - }, - "n": { - "minimum": 1, - "maximum": 32 - }, - "r": { - "register": "Vd.2S" } }, "Architectures": [ @@ -71833,63 +274784,47 @@ ], "instructions": [ [ - "SQRSHRN2" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_high_n_u16", + "name": "vqnegq_s8", "arguments": [ - "uint8x8_t r", - "uint16x8_t a", - "const int n" + "int8x16_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 8 - }, - "r": { - "register": "Vd.8B" + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQRSHRN2" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_high_n_u32", + "name": "vqnegs_s32", "arguments": [ - "uint16x4_t r", - "uint32x4_t a", - "const int n" + "int32_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 16 - }, - "r": { - "register": "Vd.4H" + "register": "Sn" } }, "Architectures": [ @@ -71897,31 +274832,35 @@ ], "instructions": [ [ - "UQRSHRN2" + "SQNEG" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_high_n_u64", + "name": "vqrdmlah_lane_s16", "arguments": [ - "uint32x2_t r", - "uint64x2_t a", - "const int n" + "int16x4_t a", + "int16x4_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.4H" }, - "n": { - "minimum": 1, - "maximum": 32 + "b": { + "register": "Vn.4H" }, - "r": { - "register": "Vd.2S" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -71929,207 +274868,241 @@ ], "instructions": [ [ - "UQRSHRN2" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_n_s16", + "name": "vqrdmlah_lane_s32", "arguments": [ - "int16x8_t a", - "const int n" + "int32x2_t a", + "int32x2_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.2S" }, - "n": { - "minimum": 1, - "maximum": 8 + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_n_s32", + "name": "vqrdmlah_laneq_s16", "arguments": [ - "int32x4_t a", - "const int n" + "int16x4_t a", + "int16x4_t b", + "int16x8_t v", + "const int lane" ], "return_type": { "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4H" }, - "n": { - "minimum": 1, - "maximum": 16 + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_n_s64", + "name": "vqrdmlah_laneq_s32", "arguments": [ - "int64x2_t a", - "const int n" + "int32x2_t a", + "int32x2_t b", + "int32x4_t v", + "const int lane" ], "return_type": { "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.2S" }, - "n": { - "minimum": 1, - "maximum": 32 + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_n_u16", + "name": "vqrdmlah_s16", "arguments": [ - "uint16x8_t a", - "const int n" + "int16x4_t a", + "int16x4_t b", + "int16x4_t c" ], "return_type": { - "value": "uint8x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.4H" }, - "n": { - "minimum": 1, - "maximum": 8 + "b": { + "register": "Vn.4H" + }, + "c": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_n_u32", + "name": "vqrdmlah_s32", "arguments": [ - "uint32x4_t a", - "const int n" + "int32x2_t a", + "int32x2_t b", + "int32x2_t c" ], "return_type": { - "value": "uint16x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.2S" }, - "n": { - "minimum": 1, - "maximum": 16 + "b": { + "register": "Vn.2S" + }, + "c": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrn_n_u64", + "name": "vqrdmlahh_lane_s16", "arguments": [ - "uint64x2_t a", - "const int n" + "int16_t a", + "int16_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Hd" }, - "n": { - "minimum": 1, - "maximum": 32 + "b": { + "register": "Hn" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrnd_n_s64", + "name": "vqrdmlahh_laneq_s16", "arguments": [ - "int64_t a", - "const int n" + "int16_t a", + "int16_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "int32_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Hd" }, - "n": { - "minimum": 1, - "maximum": 32 + "b": { + "register": "Hn" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -72137,27 +275110,30 @@ ], "instructions": [ [ - "SQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrnd_n_u64", + "name": "vqrdmlahh_s16", "arguments": [ - "uint64_t a", - "const int n" + "int16_t a", + "int16_t b", + "int16_t c" ], "return_type": { - "value": "uint32_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Hd" }, - "n": { - "minimum": 1, - "maximum": 32 + "b": { + "register": "Hn" + }, + "c": { + "register": "Hm" } }, "Architectures": [ @@ -72165,27 +275141,35 @@ ], "instructions": [ [ - "UQRSHRN" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrnh_n_s16", + "name": "vqrdmlahq_lane_s16", "arguments": [ - "int16_t a", - "const int n" + "int16x8_t a", + "int16x8_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "int8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.8H" }, - "n": { - "minimum": 1, - "maximum": 8 + "b": { + "register": "Vn.8H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -72193,27 +275177,35 @@ ], "instructions": [ [ - "SQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrnh_n_u16", + "name": "vqrdmlahq_lane_s32", "arguments": [ - "uint16_t a", - "const int n" + "int32x4_t a", + "int32x4_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.4S" }, - "n": { - "minimum": 1, - "maximum": 8 + "b": { + "register": "Vn.4S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -72221,27 +275213,35 @@ ], "instructions": [ [ - "UQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrns_n_s32", + "name": "vqrdmlahq_laneq_s16", "arguments": [ - "int32_t a", - "const int n" + "int16x8_t a", + "int16x8_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "int16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vd.4H" }, - "n": { - "minimum": 1, - "maximum": 16 + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -72249,27 +275249,35 @@ ], "instructions": [ [ - "SQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrns_n_u32", + "name": "vqrdmlahq_laneq_s32", "arguments": [ - "uint32_t a", - "const int n" + "int32x4_t a", + "int32x4_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "uint16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vd.2S" }, - "n": { - "minimum": 1, - "maximum": 16 + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ @@ -72277,31 +275285,30 @@ ], "instructions": [ [ - "UQRSHRN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrun_high_n_s16", + "name": "vqrdmlahq_s16", "arguments": [ - "uint8x8_t r", "int16x8_t a", - "const int n" + "int16x8_t b", + "int16x8_t c" ], "return_type": { - "value": "uint8x16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.8H" }, - "n": { - "minimum": 1, - "maximum": 8 + "b": { + "register": "Vn.8H" }, - "r": { - "register": "Vd.8B" + "c": { + "register": "Vm.8H" } }, "Architectures": [ @@ -72309,31 +275316,30 @@ ], "instructions": [ [ - "SQRSHRUN2" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrun_high_n_s32", + "name": "vqrdmlahq_s32", "arguments": [ - "uint16x4_t r", "int32x4_t a", - "const int n" + "int32x4_t b", + "int32x4_t c" ], "return_type": { - "value": "uint16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.4S" }, - "n": { - "minimum": 1, - "maximum": 16 + "b": { + "register": "Vn.4S" }, - "r": { - "register": "Vd.4H" + "c": { + "register": "Vm.4S" } }, "Architectures": [ @@ -72341,31 +275347,35 @@ ], "instructions": [ [ - "SQRSHRUN2" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrun_high_n_s64", + "name": "vqrdmlahs_lane_s32", "arguments": [ - "uint32x2_t r", - "int64x2_t a", - "const int n" + "int32_t a", + "int32_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Sd" }, - "n": { - "minimum": 1, - "maximum": 32 + "b": { + "register": "Sn" }, - "r": { - "register": "Vd.2S" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -72373,117 +275383,138 @@ ], "instructions": [ [ - "SQRSHRUN2" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrun_n_s16", + "name": "vqrdmlahs_laneq_s32", "arguments": [ - "int16x8_t a", - "const int n" + "int32_t a", + "int32_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Sd" }, - "n": { - "minimum": 1, - "maximum": 8 + "b": { + "register": "Sn" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHRUN" + "SQRDMLAH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrun_n_s32", - "arguments": [ - "int32x4_t a", - "const int n" + "name": "vqrdmlahs_s32", + "arguments": [ + "int32_t a", + "int32_t b", + "int32_t c" ], "return_type": { - "value": "uint16x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Sd" }, - "n": { - "minimum": 1, - "maximum": 16 + "b": { + "register": "Sn" + }, + "c": { + "register": "Sm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHRUN" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrun_n_s64", + "name": "vqrdmlsh_lane_s16", "arguments": [ - "int64x2_t a", - "const int n" + "int16x4_t a", + "int16x4_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.4H" }, - "n": { - "minimum": 1, - "maximum": 32 + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQRSHRUN" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrund_n_s64", + "name": "vqrdmlsh_lane_s32", "arguments": [ - "int64_t a", - "const int n" + "int32x2_t a", + "int32x2_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint32_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.2S" }, - "n": { - "minimum": 1, - "maximum": 32 + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -72491,27 +275522,35 @@ ], "instructions": [ [ - "SQRSHRUN" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshrunh_n_s16", + "name": "vqrdmlsh_laneq_s16", "arguments": [ - "int16_t a", - "const int n" + "int16x4_t a", + "int16x4_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "uint8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.4H" }, - "n": { - "minimum": 1, - "maximum": 8 + "b": { + "register": "Vn.4H" + }, + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -72519,27 +275558,35 @@ ], "instructions": [ [ - "SQRSHRUN" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqrshruns_n_s32", + "name": "vqrdmlsh_laneq_s32", "arguments": [ - "int32_t a", - "const int n" + "int32x2_t a", + "int32x2_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "uint16_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vd.2S" }, - "n": { - "minimum": 1, - "maximum": 16 + "b": { + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ @@ -72547,440 +275594,505 @@ ], "instructions": [ [ - "SQRSHRUN" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_n_s16", + "name": "vqrdmlsh_s16", "arguments": [ "int16x4_t a", - "const int n" + "int16x4_t b", + "int16x4_t c" ], "return_type": { "value": "int16x4_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.4H" + }, + "b": { "register": "Vn.4H" }, - "n": { - "minimum": 0, - "maximum": 15 + "c": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_n_s32", + "name": "vqrdmlsh_s32", "arguments": [ "int32x2_t a", - "const int n" + "int32x2_t b", + "int32x2_t c" ], "return_type": { "value": "int32x2_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.2S" + }, + "b": { "register": "Vn.2S" }, - "n": { - "minimum": 0, - "maximum": 31 + "c": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_n_s64", + "name": "vqrdmlshh_lane_s16", "arguments": [ - "int64x1_t a", - "const int n" + "int16_t a", + "int16_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "int64x1_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Hd" }, - "n": { + "b": { + "register": "Hn" + }, + "lane": { "minimum": 0, - "maximum": 63 + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_n_s8", + "name": "vqrdmlshh_laneq_s16", "arguments": [ - "int8x8_t a", - "const int n" + "int16_t a", + "int16_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Hd" }, - "n": { + "b": { + "register": "Hn" + }, + "lane": { "minimum": 0, "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_n_u16", + "name": "vqrdmlshh_s16", "arguments": [ - "uint16x4_t a", - "const int n" + "int16_t a", + "int16_t b", + "int16_t c" ], "return_type": { - "value": "uint16x4_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Hd" }, - "n": { - "minimum": 0, - "maximum": 15 + "b": { + "register": "Hn" + }, + "c": { + "register": "Hm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_n_u32", + "name": "vqrdmlshq_lane_s16", "arguments": [ - "uint32x2_t a", - "const int n" + "int16x8_t a", + "int16x8_t b", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.8H" }, - "n": { + "b": { + "register": "Vn.8H" + }, + "lane": { "minimum": 0, - "maximum": 31 + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_n_u64", + "name": "vqrdmlshq_lane_s32", "arguments": [ - "uint64x1_t a", - "const int n" + "int32x4_t a", + "int32x4_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.4S" }, - "n": { + "b": { + "register": "Vn.4S" + }, + "lane": { "minimum": 0, - "maximum": 63 + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_n_u8", + "name": "vqrdmlshq_laneq_s16", "arguments": [ - "uint8x8_t a", - "const int n" + "int16x8_t a", + "int16x8_t b", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vd.4H" }, - "n": { + "b": { + "register": "Vn.4H" + }, + "lane": { "minimum": 0, "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_s16", + "name": "vqrdmlshq_laneq_s32", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "int32x4_t a", + "int32x4_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.2S" }, "b": { - "register": "Vm.4H" + "register": "Vn.2S" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_s32", + "name": "vqrdmlshq_s16", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "int16x8_t a", + "int16x8_t b", + "int16x8_t c" ], "return_type": { - "value": "int32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.8H" }, "b": { - "register": "Vm.2S" + "register": "Vn.8H" + }, + "c": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_s64", + "name": "vqrdmlshq_s32", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "int32x4_t a", + "int32x4_t b", + "int32x4_t c" ], "return_type": { - "value": "int64x1_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.4S" }, "b": { - "register": "Dm" + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_s8", + "name": "vqrdmlshs_lane_s32", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "int32_t a", + "int32_t b", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Sd" }, "b": { - "register": "Vm.8B" + "register": "Sn" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_u16", + "name": "vqrdmlshs_laneq_s32", "arguments": [ - "uint16x4_t a", - "int16x4_t b" + "int32_t a", + "int32_t b", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Sd" }, "b": { - "register": "Vm.4H" + "register": "Sn" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_u32", + "name": "vqrdmlshs_s32", "arguments": [ - "uint32x2_t a", - "int32x2_t b" + "int32_t a", + "int32_t b", + "int32_t c" ], "return_type": { - "value": "uint32x2_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Sd" }, "b": { - "register": "Vm.2S" + "register": "Sn" + }, + "c": { + "register": "Sm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMLSH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_u64", + "name": "vqrdmulh_lane_s16", "arguments": [ - "uint64x1_t a", - "int64x1_t b" + "int16x4_t a", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint64x1_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.4H" }, - "b": { - "register": "Dm" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -72990,26 +276102,31 @@ ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshl_u8", + "name": "vqrdmulh_lane_s32", "arguments": [ - "uint8x8_t a", - "int8x8_t b" + "int32x2_t a", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2S" }, - "b": { - "register": "Vm.8B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ @@ -73019,27 +276136,31 @@ ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlb_n_s8", + "name": "vqrdmulh_laneq_s16", "arguments": [ - "int8_t a", - "const int n" + "int16x4_t a", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "int8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" + "register": "Vn.4H" }, - "n": { + "lane": { "minimum": 0, "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -73047,27 +276168,31 @@ ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlb_n_u8", + "name": "vqrdmulh_laneq_s32", "arguments": [ - "uint8_t a", - "const int n" + "int32x2_t a", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "uint8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" + "register": "Vn.2S" }, - "n": { + "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ @@ -73075,136 +276200,147 @@ ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlb_s8", + "name": "vqrdmulh_n_s16", "arguments": [ - "int8_t a", - "int8_t b" + "int16x4_t a", + "int16_t b" ], "return_type": { - "value": "int8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" + "register": "Vn.4H" }, "b": { - "register": "Bm" + "register": "Vm.H[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlb_u8", + "name": "vqrdmulh_n_s32", "arguments": [ - "uint8_t a", - "int8_t b" + "int32x2_t a", + "int32_t b" ], "return_type": { - "value": "uint8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" + "register": "Vn.2S" }, "b": { - "register": "Bm" + "register": "Vm.S[0]" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshld_n_s64", + "name": "vqrdmulh_s16", "arguments": [ - "int64_t a", - "const int n" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int64_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.4H" }, - "n": { - "minimum": 0, - "maximum": 63 + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshld_n_u64", + "name": "vqrdmulh_s32", "arguments": [ - "uint64_t a", - "const int n" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "uint64_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.2S" }, - "n": { - "minimum": 0, - "maximum": 63 + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshld_s64", + "name": "vqrdmulhh_lane_s16", "arguments": [ - "int64_t a", - "int64_t b" + "int16_t a", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "int64_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Hn" }, - "b": { - "register": "Dm" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ @@ -73212,26 +276348,31 @@ ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshld_u64", + "name": "vqrdmulhh_laneq_s16", "arguments": [ - "uint64_t a", - "int64_t b" + "int16_t a", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "uint64_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Hn" }, - "b": { - "register": "Dm" + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -73239,16 +276380,16 @@ ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlh_n_s16", + "name": "vqrdmulhh_s16", "arguments": [ "int16_t a", - "const int n" + "int16_t b" ], "return_type": { "value": "int16_t" @@ -73257,9 +276398,8 @@ "a": { "register": "Hn" }, - "n": { - "minimum": 0, - "maximum": 15 + "b": { + "register": "Hm" } }, "Architectures": [ @@ -73267,81 +276407,99 @@ ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlh_n_u16", + "name": "vqrdmulhq_lane_s16", "arguments": [ - "uint16_t a", - "const int n" + "int16x8_t a", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.8H" }, - "n": { + "lane": { "minimum": 0, - "maximum": 15 + "maximum": 3 + }, + "v": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlh_s16", + "name": "vqrdmulhq_lane_s32", "arguments": [ - "int16_t a", - "int16_t b" + "int32x4_t a", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "int16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.4S" }, - "b": { - "register": "Hm" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlh_u16", + "name": "vqrdmulhq_laneq_s16", "arguments": [ - "uint16_t a", - "int16_t b" + "int16x8_t a", + "int16x8_t v", + "const int lane" ], "return_type": { - "value": "uint16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.8H" }, - "b": { - "register": "Hm" + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vm.8H" } }, "Architectures": [ @@ -73349,57 +276507,58 @@ ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_n_s16", + "name": "vqrdmulhq_laneq_s32", "arguments": [ - "int16x8_t a", - "const int n" + "int32x4_t a", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4S" }, - "n": { + "lane": { "minimum": 0, - "maximum": 15 + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_n_s32", + "name": "vqrdmulhq_n_s16", "arguments": [ - "int32x4_t a", - "const int n" + "int16x8_t a", + "int16_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8H" }, - "n": { - "minimum": 0, - "maximum": 31 + "b": { + "register": "Vm.H[0]" } }, "Architectures": [ @@ -73409,27 +276568,26 @@ ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_n_s64", + "name": "vqrdmulhq_n_s32", "arguments": [ - "int64x2_t a", - "const int n" + "int32x4_t a", + "int32_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4S" }, - "n": { - "minimum": 0, - "maximum": 63 + "b": { + "register": "Vm.S[0]" } }, "Architectures": [ @@ -73439,27 +276597,26 @@ ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_n_s8", + "name": "vqrdmulhq_s16", "arguments": [ - "int8x16_t a", - "const int n" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int8x16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, - "n": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Vm.8H" } }, "Architectures": [ @@ -73469,27 +276626,26 @@ ], "instructions": [ [ - "SQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_n_u16", + "name": "vqrdmulhq_s32", "arguments": [ - "uint16x8_t a", - "const int n" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4S" }, - "n": { - "minimum": 0, - "maximum": 15 + "b": { + "register": "Vm.4S" } }, "Architectures": [ @@ -73499,116 +276655,117 @@ ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_n_u32", + "name": "vqrdmulhs_lane_s32", "arguments": [ - "uint32x4_t a", - "const int n" + "int32_t a", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Sn" }, - "n": { + "lane": { "minimum": 0, - "maximum": 31 + "maximum": 1 + }, + "v": { + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_n_u64", + "name": "vqrdmulhs_laneq_s32", "arguments": [ - "uint64x2_t a", - "const int n" + "int32_t a", + "int32x4_t v", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Sn" }, - "n": { + "lane": { "minimum": 0, - "maximum": 63 + "maximum": 3 + }, + "v": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_n_u8", + "name": "vqrdmulhs_s32", "arguments": [ - "uint8x16_t a", - "const int n" + "int32_t a", + "int32_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Sn" }, - "n": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Sm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UQSHL" + "SQRDMULH" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_s16", + "name": "vqrshl_s16", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, "b": { - "register": "Vm.8H" + "register": "Vm.4H" } }, "Architectures": [ @@ -73618,26 +276775,26 @@ ], "instructions": [ [ - "SQSHL" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_s32", + "name": "vqrshl_s32", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" }, "b": { - "register": "Vm.4S" + "register": "Vm.2S" } }, "Architectures": [ @@ -73647,26 +276804,26 @@ ], "instructions": [ [ - "SQSHL" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_s64", + "name": "vqrshl_s64", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "int64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Dn" }, "b": { - "register": "Vm.2D" + "register": "Dm" } }, "Architectures": [ @@ -73676,26 +276833,26 @@ ], "instructions": [ [ - "SQSHL" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_s8", + "name": "vqrshl_s8", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int8x16_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8B" }, "b": { - "register": "Vm.16B" + "register": "Vm.8B" } }, "Architectures": [ @@ -73705,26 +276862,26 @@ ], "instructions": [ [ - "SQSHL" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_u16", + "name": "vqrshl_u16", "arguments": [ - "uint16x8_t a", - "int16x8_t b" + "uint16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, "b": { - "register": "Vm.8H" + "register": "Vm.4H" } }, "Architectures": [ @@ -73734,26 +276891,26 @@ ], "instructions": [ [ - "UQSHL" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_u32", + "name": "vqrshl_u32", "arguments": [ - "uint32x4_t a", - "int32x4_t b" + "uint32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" }, "b": { - "register": "Vm.4S" + "register": "Vm.2S" } }, "Architectures": [ @@ -73763,26 +276920,26 @@ ], "instructions": [ [ - "UQSHL" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_u64", + "name": "vqrshl_u64", "arguments": [ - "uint64x2_t a", - "int64x2_t b" + "uint64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Dn" }, "b": { - "register": "Vm.2D" + "register": "Dm" } }, "Architectures": [ @@ -73792,26 +276949,26 @@ ], "instructions": [ [ - "UQSHL" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlq_u8", + "name": "vqrshl_u8", "arguments": [ - "uint8x16_t a", - "int8x16_t b" + "uint8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8B" }, "b": { - "register": "Vm.16B" + "register": "Vm.8B" } }, "Architectures": [ @@ -73821,27 +276978,26 @@ ], "instructions": [ [ - "UQSHL" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshls_n_s32", + "name": "vqrshlb_s8", "arguments": [ - "int32_t a", - "const int n" + "int8_t a", + "int8_t b" ], "return_type": { - "value": "int32_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Bn" }, - "n": { - "minimum": 0, - "maximum": 31 + "b": { + "register": "Bm" } }, "Architectures": [ @@ -73849,27 +277005,26 @@ ], "instructions": [ [ - "SQSHL" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshls_n_u32", + "name": "vqrshlb_u8", "arguments": [ - "uint32_t a", - "const int n" + "uint8_t a", + "int8_t b" ], "return_type": { - "value": "uint32_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Bn" }, - "n": { - "minimum": 0, - "maximum": 31 + "b": { + "register": "Bm" } }, "Architectures": [ @@ -73877,26 +277032,26 @@ ], "instructions": [ [ - "UQSHL" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshls_s32", + "name": "vqrshld_s64", "arguments": [ - "int32_t a", - "int32_t b" + "int64_t a", + "int64_t b" ], "return_type": { - "value": "int32_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Dn" }, "b": { - "register": "Sm" + "register": "Dm" } }, "Architectures": [ @@ -73904,26 +277059,26 @@ ], "instructions": [ [ - "SQSHL" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshls_u32", + "name": "vqrshld_u64", "arguments": [ - "uint32_t a", - "int32_t b" + "uint64_t a", + "int64_t b" ], "return_type": { - "value": "uint32_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Dn" }, "b": { - "register": "Sm" + "register": "Dm" } }, "Architectures": [ @@ -73931,87 +277086,80 @@ ], "instructions": [ [ - "UQSHL" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlu_n_s16", + "name": "vqrshlh_s16", "arguments": [ - "int16x4_t a", - "const int n" + "int16_t a", + "int16_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Hn" }, - "n": { - "minimum": 0, - "maximum": 15 + "b": { + "register": "Hm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHLU" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlu_n_s32", + "name": "vqrshlh_u16", "arguments": [ - "int32x2_t a", - "const int n" + "uint16_t a", + "int16_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Hn" }, - "n": { - "minimum": 0, - "maximum": 31 + "b": { + "register": "Hm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHLU" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlu_n_s64", + "name": "vqrshlq_s16", "arguments": [ - "int64x1_t a", - "const int n" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "uint64x1_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.8H" }, - "n": { - "minimum": 0, - "maximum": 63 + "b": { + "register": "Vm.8H" } }, "Architectures": [ @@ -74021,27 +277169,26 @@ ], "instructions": [ [ - "SQSHLU" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlu_n_s8", + "name": "vqrshlq_s32", "arguments": [ - "int8x8_t a", - "const int n" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" }, - "n": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Vm.4S" } }, "Architectures": [ @@ -74051,111 +277198,113 @@ ], "instructions": [ [ - "SQSHLU" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlub_n_s8", + "name": "vqrshlq_s64", "arguments": [ - "int8_t a", - "const int n" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "uint8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Bn" + "register": "Vn.2D" }, - "n": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQSHLU" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlud_n_s64", + "name": "vqrshlq_s8", "arguments": [ - "int64_t a", - "const int n" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "uint64_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.16B" }, - "n": { - "minimum": 0, - "maximum": 63 + "b": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQSHLU" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshluh_n_s16", + "name": "vqrshlq_u16", "arguments": [ - "int16_t a", - "const int n" + "uint16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "uint16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.8H" }, - "n": { - "minimum": 0, - "maximum": 15 + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQSHLU" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshluq_n_s16", + "name": "vqrshlq_u32", "arguments": [ - "int16x8_t a", - "const int n" + "uint32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4S" }, - "n": { - "minimum": 0, - "maximum": 15 + "b": { + "register": "Vm.4S" } }, "Architectures": [ @@ -74165,27 +277314,26 @@ ], "instructions": [ [ - "SQSHLU" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshluq_n_s32", + "name": "vqrshlq_u64", "arguments": [ - "int32x4_t a", - "const int n" + "uint64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2D" }, - "n": { - "minimum": 0, - "maximum": 31 + "b": { + "register": "Vm.2D" } }, "Architectures": [ @@ -74195,27 +277343,26 @@ ], "instructions": [ [ - "SQSHLU" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshluq_n_s64", + "name": "vqrshlq_u8", "arguments": [ - "int64x2_t a", - "const int n" + "uint8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.16B" }, - "n": { - "minimum": 0, - "maximum": 63 + "b": { + "register": "Vm.16B" } }, "Architectures": [ @@ -74225,46 +277372,43 @@ ], "instructions": [ [ - "SQSHLU" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshluq_n_s8", + "name": "vqrshls_s32", "arguments": [ - "int8x16_t a", - "const int n" + "int32_t a", + "int32_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Sn" }, - "n": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Sm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SQSHLU" + "SQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshlus_n_s32", + "name": "vqrshls_u32", "arguments": [ - "int32_t a", - "const int n" + "uint32_t a", + "int32_t b" ], "return_type": { "value": "uint32_t" @@ -74273,9 +277417,8 @@ "a": { "register": "Sn" }, - "n": { - "minimum": 0, - "maximum": 31 + "b": { + "register": "Sm" } }, "Architectures": [ @@ -74283,13 +277426,13 @@ ], "instructions": [ [ - "SQSHLU" + "UQRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_high_n_s16", + "name": "vqrshrn_high_n_s16", "arguments": [ "int8x8_t r", "int16x8_t a", @@ -74315,13 +277458,13 @@ ], "instructions": [ [ - "SQSHRN2" + "SQRSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_high_n_s32", + "name": "vqrshrn_high_n_s32", "arguments": [ "int16x4_t r", "int32x4_t a", @@ -74347,13 +277490,13 @@ ], "instructions": [ [ - "SQSHRN2" + "SQRSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_high_n_s64", + "name": "vqrshrn_high_n_s64", "arguments": [ "int32x2_t r", "int64x2_t a", @@ -74379,13 +277522,13 @@ ], "instructions": [ [ - "SQSHRN2" + "SQRSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_high_n_u16", + "name": "vqrshrn_high_n_u16", "arguments": [ "uint8x8_t r", "uint16x8_t a", @@ -74411,13 +277554,13 @@ ], "instructions": [ [ - "UQSHRN2" + "UQRSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_high_n_u32", + "name": "vqrshrn_high_n_u32", "arguments": [ "uint16x4_t r", "uint32x4_t a", @@ -74443,13 +277586,13 @@ ], "instructions": [ [ - "UQSHRN2" + "UQRSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_high_n_u64", + "name": "vqrshrn_high_n_u64", "arguments": [ "uint32x2_t r", "uint64x2_t a", @@ -74475,13 +277618,13 @@ ], "instructions": [ [ - "UQSHRN2" + "UQRSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_n_s16", + "name": "vqrshrn_n_s16", "arguments": [ "int16x8_t a", "const int n" @@ -74505,13 +277648,13 @@ ], "instructions": [ [ - "SQSHRN" + "SQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_n_s32", + "name": "vqrshrn_n_s32", "arguments": [ "int32x4_t a", "const int n" @@ -74535,13 +277678,13 @@ ], "instructions": [ [ - "SQSHRN" + "SQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_n_s64", + "name": "vqrshrn_n_s64", "arguments": [ "int64x2_t a", "const int n" @@ -74565,13 +277708,13 @@ ], "instructions": [ [ - "SQSHRN" + "SQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_n_u16", + "name": "vqrshrn_n_u16", "arguments": [ "uint16x8_t a", "const int n" @@ -74595,13 +277738,13 @@ ], "instructions": [ [ - "UQSHRN" + "UQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_n_u32", + "name": "vqrshrn_n_u32", "arguments": [ "uint32x4_t a", "const int n" @@ -74625,13 +277768,13 @@ ], "instructions": [ [ - "UQSHRN" + "UQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrn_n_u64", + "name": "vqrshrn_n_u64", "arguments": [ "uint64x2_t a", "const int n" @@ -74655,13 +277798,13 @@ ], "instructions": [ [ - "UQSHRN" + "UQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrnd_n_s64", + "name": "vqrshrnd_n_s64", "arguments": [ "int64_t a", "const int n" @@ -74683,13 +277826,13 @@ ], "instructions": [ [ - "SQSHRN" + "SQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrnd_n_u64", + "name": "vqrshrnd_n_u64", "arguments": [ "uint64_t a", "const int n" @@ -74711,13 +277854,13 @@ ], "instructions": [ [ - "UQSHRN" + "UQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrnh_n_s16", + "name": "vqrshrnh_n_s16", "arguments": [ "int16_t a", "const int n" @@ -74739,13 +277882,13 @@ ], "instructions": [ [ - "SQSHRN" + "SQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrnh_n_u16", + "name": "vqrshrnh_n_u16", "arguments": [ "uint16_t a", "const int n" @@ -74767,13 +277910,13 @@ ], "instructions": [ [ - "UQSHRN" + "UQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrns_n_s32", + "name": "vqrshrns_n_s32", "arguments": [ "int32_t a", "const int n" @@ -74795,13 +277938,13 @@ ], "instructions": [ [ - "SQSHRN" + "SQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrns_n_u32", + "name": "vqrshrns_n_u32", "arguments": [ "uint32_t a", "const int n" @@ -74823,13 +277966,13 @@ ], "instructions": [ [ - "UQSHRN" + "UQRSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrun_high_n_s16", + "name": "vqrshrun_high_n_s16", "arguments": [ "uint8x8_t r", "int16x8_t a", @@ -74855,13 +277998,13 @@ ], "instructions": [ [ - "SQSHRUN2" + "SQRSHRUN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrun_high_n_s32", + "name": "vqrshrun_high_n_s32", "arguments": [ "uint16x4_t r", "int32x4_t a", @@ -74887,13 +278030,13 @@ ], "instructions": [ [ - "SQSHRUN2" + "SQRSHRUN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrun_high_n_s64", + "name": "vqrshrun_high_n_s64", "arguments": [ "uint32x2_t r", "int64x2_t a", @@ -74919,13 +278062,13 @@ ], "instructions": [ [ - "SQSHRUN2" + "SQRSHRUN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrun_n_s16", + "name": "vqrshrun_n_s16", "arguments": [ "int16x8_t a", "const int n" @@ -74949,13 +278092,13 @@ ], "instructions": [ [ - "SQSHRUN" + "SQRSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrun_n_s32", + "name": "vqrshrun_n_s32", "arguments": [ "int32x4_t a", "const int n" @@ -74979,13 +278122,13 @@ ], "instructions": [ [ - "SQSHRUN" + "SQRSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrun_n_s64", + "name": "vqrshrun_n_s64", "arguments": [ "int64x2_t a", "const int n" @@ -75009,13 +278152,13 @@ ], "instructions": [ [ - "SQSHRUN" + "SQRSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrund_n_s64", + "name": "vqrshrund_n_s64", "arguments": [ "int64_t a", "const int n" @@ -75037,13 +278180,13 @@ ], "instructions": [ [ - "SQSHRUN" + "SQRSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshrunh_n_s16", + "name": "vqrshrunh_n_s16", "arguments": [ "int16_t a", "const int n" @@ -75065,13 +278208,13 @@ ], "instructions": [ [ - "SQSHRUN" + "SQRSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqshruns_n_s32", + "name": "vqrshruns_n_s32", "arguments": [ "int32_t a", "const int n" @@ -75093,13 +278236,253 @@ ], "instructions": [ [ - "SQSHRUN" + "SQRSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsub_s16", + "name": "vqshl_n_s16", + "arguments": [ + "int16x4_t a", + "const int n" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "n": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshl_n_s32", + "arguments": [ + "int32x2_t a", + "const int n" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "n": { + "minimum": 0, + "maximum": 31 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshl_n_s64", + "arguments": [ + "int64x1_t a", + "const int n" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 0, + "maximum": 63 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshl_n_s8", + "arguments": [ + "int8x8_t a", + "const int n" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "n": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshl_n_u16", + "arguments": [ + "uint16x4_t a", + "const int n" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "n": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshl_n_u32", + "arguments": [ + "uint32x2_t a", + "const int n" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "n": { + "minimum": 0, + "maximum": 31 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshl_n_u64", + "arguments": [ + "uint64x1_t a", + "const int n" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 0, + "maximum": 63 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshl_n_u8", + "arguments": [ + "uint8x8_t a", + "const int n" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "n": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshl_s16", "arguments": [ "int16x4_t a", "int16x4_t b" @@ -75122,13 +278505,13 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsub_s32", + "name": "vqshl_s32", "arguments": [ "int32x2_t a", "int32x2_t b" @@ -75151,13 +278534,13 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsub_s64", + "name": "vqshl_s64", "arguments": [ "int64x1_t a", "int64x1_t b" @@ -75180,13 +278563,13 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsub_s8", + "name": "vqshl_s8", "arguments": [ "int8x8_t a", "int8x8_t b" @@ -75209,16 +278592,16 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsub_u16", + "name": "vqshl_u16", "arguments": [ "uint16x4_t a", - "uint16x4_t b" + "int16x4_t b" ], "return_type": { "value": "uint16x4_t" @@ -75238,16 +278621,16 @@ ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsub_u32", + "name": "vqshl_u32", "arguments": [ "uint32x2_t a", - "uint32x2_t b" + "int32x2_t b" ], "return_type": { "value": "uint32x2_t" @@ -75267,16 +278650,16 @@ ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsub_u64", + "name": "vqshl_u64", "arguments": [ "uint64x1_t a", - "uint64x1_t b" + "int64x1_t b" ], "return_type": { "value": "uint64x1_t" @@ -75296,16 +278679,16 @@ ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsub_u8", + "name": "vqshl_u8", "arguments": [ "uint8x8_t a", - "uint8x8_t b" + "int8x8_t b" ], "return_type": { "value": "uint8x8_t" @@ -75325,13 +278708,69 @@ ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubb_s8", + "name": "vqshlb_n_s8", + "arguments": [ + "int8_t a", + "const int n" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Bn" + }, + "n": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlb_n_u8", + "arguments": [ + "uint8_t a", + "const int n" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Bn" + }, + "n": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlb_s8", "arguments": [ "int8_t a", "int8_t b" @@ -75352,16 +278791,16 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubb_u8", + "name": "vqshlb_u8", "arguments": [ "uint8_t a", - "uint8_t b" + "int8_t b" ], "return_type": { "value": "uint8_t" @@ -75379,121 +278818,473 @@ ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubd_s64", + "name": "vqshld_n_s64", + "arguments": [ + "int64_t a", + "const int n" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 0, + "maximum": 63 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshld_n_u64", + "arguments": [ + "uint64_t a", + "const int n" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "n": { + "minimum": 0, + "maximum": 63 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshld_s64", "arguments": [ "int64_t a", "int64_t b" ], "return_type": { - "value": "int64_t" + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshld_u64", + "arguments": [ + "uint64_t a", + "int64_t b" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + }, + "b": { + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlh_n_s16", + "arguments": [ + "int16_t a", + "const int n" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlh_n_u16", + "arguments": [ + "uint16_t a", + "const int n" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "n": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlh_s16", + "arguments": [ + "int16_t a", + "int16_t b" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlh_u16", + "arguments": [ + "uint16_t a", + "int16_t b" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hn" + }, + "b": { + "register": "Hm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlq_n_s16", + "arguments": [ + "int16x8_t a", + "const int n" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "n": { + "minimum": 0, + "maximum": 15 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlq_n_s32", + "arguments": [ + "int32x4_t a", + "const int n" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4S" + }, + "n": { + "minimum": 0, + "maximum": 31 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlq_n_s64", + "arguments": [ + "int64x2_t a", + "const int n" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "n": { + "minimum": 0, + "maximum": 63 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlq_n_s8", + "arguments": [ + "int8x16_t a", + "const int n" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "n": { + "minimum": 0, + "maximum": 7 + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshlq_n_u16", + "arguments": [ + "uint16x8_t a", + "const int n" + ], + "return_type": { + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.8H" }, - "b": { - "register": "Dm" + "n": { + "minimum": 0, + "maximum": 15 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubd_u64", + "name": "vqshlq_n_u32", "arguments": [ - "uint64_t a", - "uint64_t b" + "uint32x4_t a", + "const int n" ], "return_type": { - "value": "uint64_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.4S" }, - "b": { - "register": "Dm" + "n": { + "minimum": 0, + "maximum": 31 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubh_s16", + "name": "vqshlq_n_u64", "arguments": [ - "int16_t a", - "int16_t b" + "uint64x2_t a", + "const int n" ], "return_type": { - "value": "int16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.2D" }, - "b": { - "register": "Hm" + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubh_u16", + "name": "vqshlq_n_u8", "arguments": [ - "uint16_t a", - "uint16_t b" + "uint8x16_t a", + "const int n" ], "return_type": { - "value": "uint16_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.16B" }, - "b": { - "register": "Hm" + "n": { + "minimum": 0, + "maximum": 7 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubq_s16", + "name": "vqshlq_s16", "arguments": [ "int16x8_t a", "int16x8_t b" @@ -75516,13 +279307,13 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubq_s32", + "name": "vqshlq_s32", "arguments": [ "int32x4_t a", "int32x4_t b" @@ -75545,13 +279336,13 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubq_s64", + "name": "vqshlq_s64", "arguments": [ "int64x2_t a", "int64x2_t b" @@ -75574,13 +279365,13 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubq_s8", + "name": "vqshlq_s8", "arguments": [ "int8x16_t a", "int8x16_t b" @@ -75603,16 +279394,16 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubq_u16", + "name": "vqshlq_u16", "arguments": [ "uint16x8_t a", - "uint16x8_t b" + "int16x8_t b" ], "return_type": { "value": "uint16x8_t" @@ -75632,16 +279423,16 @@ ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubq_u32", + "name": "vqshlq_u32", "arguments": [ "uint32x4_t a", - "uint32x4_t b" + "int32x4_t b" ], "return_type": { "value": "uint32x4_t" @@ -75661,16 +279452,16 @@ ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubq_u64", + "name": "vqshlq_u64", "arguments": [ "uint64x2_t a", - "uint64x2_t b" + "int64x2_t b" ], "return_type": { "value": "uint64x2_t" @@ -75690,16 +279481,16 @@ ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubq_u8", + "name": "vqshlq_u8", "arguments": [ "uint8x16_t a", - "uint8x16_t b" + "int8x16_t b" ], "return_type": { "value": "uint8x16_t" @@ -75719,16 +279510,16 @@ ], "instructions": [ [ - "UQSUB" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubs_s32", + "name": "vqshls_n_s32", "arguments": [ "int32_t a", - "int32_t b" + "const int n" ], "return_type": { "value": "int32_t" @@ -75737,8 +279528,9 @@ "a": { "register": "Sn" }, - "b": { - "register": "Sm" + "n": { + "minimum": 0, + "maximum": 31 } }, "Architectures": [ @@ -75746,20 +279538,48 @@ ], "instructions": [ [ - "SQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqsubs_u32", + "name": "vqshls_n_u32", "arguments": [ "uint32_t a", - "uint32_t b" + "const int n" ], "return_type": { "value": "uint32_t" }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + }, + "n": { + "minimum": 0, + "maximum": 31 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqshls_s32", + "arguments": [ + "int32_t a", + "int32_t b" + ], + "return_type": { + "value": "int32_t" + }, "Arguments_Preparation": { "a": { "register": "Sn" @@ -75773,26 +279593,26 @@ ], "instructions": [ [ - "UQSUB" + "SQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl1_p8", + "name": "vqshls_u32", "arguments": [ - "poly8x16_t t", - "uint8x8_t idx" + "uint32_t a", + "int32_t b" ], "return_type": { - "value": "poly8x8_t" + "value": "uint32_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Sn" }, - "t": { - "register": "Vn.16B" + "b": { + "register": "Sm" } }, "Architectures": [ @@ -75800,134 +279620,147 @@ ], "instructions": [ [ - "TBL" + "UQSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl1_s8", + "name": "vqshlu_n_s16", "arguments": [ - "int8x16_t t", - "uint8x8_t idx" + "int16x4_t a", + "const int n" ], "return_type": { - "value": "int8x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Vn.4H" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 15 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl1_u8", + "name": "vqshlu_n_s32", "arguments": [ - "uint8x16_t t", - "uint8x8_t idx" + "int32x2_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Vn.2S" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 31 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl1q_p8", + "name": "vqshlu_n_s64", "arguments": [ - "poly8x16_t t", - "uint8x16_t idx" + "int64x1_t a", + "const int n" ], "return_type": { - "value": "poly8x16_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Dn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl1q_s8", + "name": "vqshlu_n_s8", "arguments": [ - "int8x16_t t", - "uint8x16_t idx" + "int8x8_t a", + "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Vn.8B" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 7 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl1q_u8", + "name": "vqshlub_n_s8", "arguments": [ - "uint8x16_t t", - "uint8x16_t idx" + "int8_t a", + "const int n" ], "return_type": { - "value": "uint8x16_t" + "value": "uint8_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Bn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 7 } }, "Architectures": [ @@ -75935,26 +279768,27 @@ ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl2_p8", + "name": "vqshlud_n_s64", "arguments": [ - "poly8x16x2_t t", - "uint8x8_t idx" + "int64_t a", + "const int n" ], "return_type": { - "value": "poly8x8_t" + "value": "uint64_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Dn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ @@ -75962,26 +279796,27 @@ ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl2_s8", + "name": "vqshluh_n_s16", "arguments": [ - "int8x16x2_t t", - "uint8x8_t idx" + "int16_t a", + "const int n" ], "return_type": { - "value": "int8x8_t" + "value": "uint16_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Hn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 15 } }, "Architectures": [ @@ -75989,134 +279824,147 @@ ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl2_u8", + "name": "vqshluq_n_s16", "arguments": [ - "uint8x16x2_t t", - "uint8x8_t idx" + "int16x8_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Vn.8H" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 15 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl2q_p8", + "name": "vqshluq_n_s32", "arguments": [ - "poly8x16x2_t t", - "uint8x16_t idx" + "int32x4_t a", + "const int n" ], "return_type": { - "value": "poly8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Vn.4S" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 31 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl2q_s8", + "name": "vqshluq_n_s64", "arguments": [ - "int8x16x2_t t", - "uint8x16_t idx" + "int64x2_t a", + "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Vn.2D" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl2q_u8", + "name": "vqshluq_n_s8", "arguments": [ - "uint8x16x2_t t", - "uint8x16_t idx" + "int8x16_t a", + "const int n" ], "return_type": { "value": "uint8x16_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" - }, - "t": { + "a": { "register": "Vn.16B" + }, + "n": { + "minimum": 0, + "maximum": 7 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl3_p8", + "name": "vqshlus_n_s32", "arguments": [ - "poly8x16x3_t t", - "uint8x8_t idx" + "int32_t a", + "const int n" ], "return_type": { - "value": "poly8x8_t" + "value": "uint32_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Sn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 0, + "maximum": 31 } }, "Architectures": [ @@ -76124,26 +279972,31 @@ ], "instructions": [ [ - "TBL" + "SQSHLU" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl3_s8", + "name": "vqshrn_high_n_s16", "arguments": [ - "int8x16x3_t t", - "uint8x8_t idx" + "int8x8_t r", + "int16x8_t a", + "const int n" ], "return_type": { - "value": "int8x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Vn.8H" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 8 + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -76151,26 +280004,31 @@ ], "instructions": [ [ - "TBL" + "SQSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl3_u8", + "name": "vqshrn_high_n_s32", "arguments": [ - "uint8x16x3_t t", - "uint8x8_t idx" + "int16x4_t r", + "int32x4_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Vn.4S" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 16 + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -76178,26 +280036,31 @@ ], "instructions": [ [ - "TBL" + "SQSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl3q_p8", + "name": "vqshrn_high_n_s64", "arguments": [ - "poly8x16x3_t t", - "uint8x16_t idx" + "int32x2_t r", + "int64x2_t a", + "const int n" ], "return_type": { - "value": "poly8x16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Vn.2D" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 32 + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -76205,26 +280068,31 @@ ], "instructions": [ [ - "TBL" + "SQSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl3q_s8", + "name": "vqshrn_high_n_u16", "arguments": [ - "int8x16x3_t t", - "uint8x16_t idx" + "uint8x8_t r", + "uint16x8_t a", + "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Vn.8H" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 8 + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -76232,26 +280100,31 @@ ], "instructions": [ [ - "TBL" + "UQSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl3q_u8", + "name": "vqshrn_high_n_u32", "arguments": [ - "uint8x16x3_t t", - "uint8x16_t idx" + "uint16x4_t r", + "uint32x4_t a", + "const int n" ], "return_type": { - "value": "uint8x16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Vn.4S" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 16 + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -76259,26 +280132,31 @@ ], "instructions": [ [ - "TBL" + "UQSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl4_p8", + "name": "vqshrn_high_n_u64", "arguments": [ - "poly8x16x4_t t", - "uint8x8_t idx" + "uint32x2_t r", + "uint64x2_t a", + "const int n" ], "return_type": { - "value": "poly8x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Vn.2D" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 32 + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -76286,196 +280164,207 @@ ], "instructions": [ [ - "TBL" + "UQSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl4_s8", + "name": "vqshrn_n_s16", "arguments": [ - "int8x16x4_t t", - "uint8x8_t idx" + "int16x8_t a", + "const int n" ], "return_type": { "value": "int8x8_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Vn.8H" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl4_u8", + "name": "vqshrn_n_s32", "arguments": [ - "uint8x16x4_t t", - "uint8x8_t idx" + "int32x4_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.8B" + "a": { + "register": "Vn.4S" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl4q_p8", + "name": "vqshrn_n_s64", "arguments": [ - "poly8x16x4_t t", - "uint8x16_t idx" + "int64x2_t a", + "const int n" ], "return_type": { - "value": "poly8x16_t" + "value": "int32x2_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Vn.2D" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "SQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl4q_s8", + "name": "vqshrn_n_u16", "arguments": [ - "int8x16x4_t t", - "uint8x16_t idx" + "uint16x8_t a", + "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Vn.8H" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "UQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbl4q_u8", + "name": "vqshrn_n_u32", "arguments": [ - "uint8x16x4_t t", - "uint8x16_t idx" + "uint32x4_t a", + "const int n" ], "return_type": { - "value": "uint8x16_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { - "idx": { - "register": "Vm.16B" + "a": { + "register": "Vn.4S" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBL" + "UQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx1_p8", + "name": "vqshrn_n_u64", "arguments": [ - "poly8x8_t a", - "poly8x16_t t", - "uint8x8_t idx" + "uint64x2_t a", + "const int n" ], "return_type": { - "value": "poly8x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "idx": { - "register": "Vm.8B" + "register": "Vn.2D" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "UQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx1_s8", + "name": "vqshrnd_n_s64", "arguments": [ - "int8x8_t a", - "int8x16_t t", - "uint8x8_t idx" + "int64_t a", + "const int n" ], "return_type": { - "value": "int8x8_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "idx": { - "register": "Vm.8B" + "register": "Dn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -76483,30 +280372,27 @@ ], "instructions": [ [ - "TBX" + "SQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx1_u8", + "name": "vqshrnd_n_u64", "arguments": [ - "uint8x8_t a", - "uint8x16_t t", - "uint8x8_t idx" + "uint64_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "idx": { - "register": "Vm.8B" + "register": "Dn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -76514,30 +280400,27 @@ ], "instructions": [ [ - "TBX" + "UQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx1q_p8", + "name": "vqshrnh_n_s16", "arguments": [ - "poly8x16_t a", - "poly8x16_t t", - "uint8x16_t idx" + "int16_t a", + "const int n" ], "return_type": { - "value": "poly8x16_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Hn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -76545,30 +280428,27 @@ ], "instructions": [ [ - "TBX" + "SQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx1q_s8", + "name": "vqshrnh_n_u16", "arguments": [ - "int8x16_t a", - "int8x16_t t", - "uint8x16_t idx" + "uint16_t a", + "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Hn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -76576,30 +280456,27 @@ ], "instructions": [ [ - "TBX" + "UQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx1q_u8", + "name": "vqshrns_n_s32", "arguments": [ - "uint8x16_t a", - "uint8x16_t t", - "uint8x16_t idx" + "int32_t a", + "const int n" ], "return_type": { - "value": "uint8x16_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Sn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -76607,30 +280484,27 @@ ], "instructions": [ [ - "TBX" + "SQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx2_p8", + "name": "vqshrns_n_u32", "arguments": [ - "poly8x8_t a", - "poly8x16x2_t t", - "uint8x8_t idx" + "uint32_t a", + "const int n" ], "return_type": { - "value": "poly8x8_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "idx": { - "register": "Vm.8B" + "register": "Sn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -76638,30 +280512,31 @@ ], "instructions": [ [ - "TBX" + "UQSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx2_s8", + "name": "vqshrun_high_n_s16", "arguments": [ - "int8x8_t a", - "int8x16x2_t t", - "uint8x8_t idx" + "uint8x8_t r", + "int16x8_t a", + "const int n" ], "return_type": { - "value": "int8x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.8H" }, - "idx": { - "register": "Vm.8B" + "n": { + "minimum": 1, + "maximum": 8 }, - "t": { - "register": "Vn.16B" + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -76669,30 +280544,31 @@ ], "instructions": [ [ - "TBX" + "SQSHRUN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx2_u8", + "name": "vqshrun_high_n_s32", "arguments": [ - "uint8x8_t a", - "uint8x16x2_t t", - "uint8x8_t idx" + "uint16x4_t r", + "int32x4_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.4S" }, - "idx": { - "register": "Vm.8B" + "n": { + "minimum": 1, + "maximum": 16 }, - "t": { - "register": "Vn.16B" + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -76700,30 +280576,31 @@ ], "instructions": [ [ - "TBX" + "SQSHRUN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx2q_p8", + "name": "vqshrun_high_n_s64", "arguments": [ - "poly8x16_t a", - "poly8x16x2_t t", - "uint8x16_t idx" + "uint32x2_t r", + "int64x2_t a", + "const int n" ], "return_type": { - "value": "poly8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vn.2D" }, - "idx": { - "register": "Vm.16B" + "n": { + "minimum": 1, + "maximum": 32 }, - "t": { - "register": "Vn.16B" + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -76731,123 +280608,117 @@ ], "instructions": [ [ - "TBX" + "SQSHRUN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx2q_s8", + "name": "vqshrun_n_s16", "arguments": [ - "int8x16_t a", - "int8x16x2_t t", - "uint8x16_t idx" + "int16x8_t a", + "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Vn.8H" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "SQSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx2q_u8", + "name": "vqshrun_n_s32", "arguments": [ - "uint8x16_t a", - "uint8x16x2_t t", - "uint8x16_t idx" + "int32x4_t a", + "const int n" ], "return_type": { - "value": "uint8x16_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Vn.4S" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "SQSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx3_p8", + "name": "vqshrun_n_s64", "arguments": [ - "poly8x8_t a", - "poly8x16x3_t t", - "uint8x8_t idx" + "int64x2_t a", + "const int n" ], "return_type": { - "value": "poly8x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "idx": { - "register": "Vm.8B" + "register": "Vn.2D" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "SQSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx3_s8", + "name": "vqshrund_n_s64", "arguments": [ - "int8x8_t a", - "int8x16x3_t t", - "uint8x8_t idx" + "int64_t a", + "const int n" ], "return_type": { - "value": "int8x8_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "idx": { - "register": "Vm.8B" + "register": "Dn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -76855,30 +280726,27 @@ ], "instructions": [ [ - "TBX" + "SQSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx3_u8", + "name": "vqshrunh_n_s16", "arguments": [ - "uint8x8_t a", - "uint8x16x3_t t", - "uint8x8_t idx" + "int16_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "idx": { - "register": "Vm.8B" + "register": "Hn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -76886,30 +280754,27 @@ ], "instructions": [ [ - "TBX" + "SQSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx3q_p8", + "name": "vqshruns_n_s32", "arguments": [ - "poly8x16_t a", - "poly8x16x3_t t", - "uint8x16_t idx" + "int32_t a", + "const int n" ], "return_type": { - "value": "poly8x16_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Sn" }, - "t": { - "register": "Vn.16B" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -76917,278 +280782,258 @@ ], "instructions": [ [ - "TBX" + "SQSHRUN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx3q_s8", + "name": "vqsub_s16", "arguments": [ - "int8x16_t a", - "int8x16x3_t t", - "uint8x16_t idx" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int8x16_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Vn.4H" }, - "t": { - "register": "Vn.16B" + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx3q_u8", + "name": "vqsub_s32", "arguments": [ - "uint8x16_t a", - "uint8x16x3_t t", - "uint8x16_t idx" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Vn.2S" }, - "t": { - "register": "Vn.16B" + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx4_p8", + "name": "vqsub_s64", "arguments": [ - "poly8x8_t a", - "poly8x16x4_t t", - "uint8x8_t idx" + "int64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "poly8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "idx": { - "register": "Vm.8B" + "register": "Dn" }, - "t": { - "register": "Vn.16B" + "b": { + "register": "Dm" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx4_s8", + "name": "vqsub_s8", "arguments": [ "int8x8_t a", - "int8x16x4_t t", - "uint8x8_t idx" + "int8x8_t b" ], "return_type": { "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.8B" }, - "idx": { + "b": { "register": "Vm.8B" - }, - "t": { - "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx4_u8", + "name": "vqsub_u16", "arguments": [ - "uint8x8_t a", - "uint8x16x4_t t", - "uint8x8_t idx" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "idx": { - "register": "Vm.8B" + "register": "Vn.4H" }, - "t": { - "register": "Vn.16B" + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx4q_p8", + "name": "vqsub_u32", "arguments": [ - "poly8x16_t a", - "poly8x16x4_t t", - "uint8x16_t idx" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Vn.2S" }, - "t": { - "register": "Vn.16B" + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx4q_s8", + "name": "vqsub_u64", "arguments": [ - "int8x16_t a", - "int8x16x4_t t", - "uint8x16_t idx" + "uint64x1_t a", + "uint64x1_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Dn" }, - "t": { - "register": "Vn.16B" + "b": { + "register": "Dm" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vqtbx4q_u8", + "name": "vqsub_u8", "arguments": [ - "uint8x16_t a", - "uint8x16x4_t t", - "uint8x16_t idx" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "idx": { - "register": "Vm.16B" + "register": "Vn.8B" }, - "t": { - "register": "Vn.16B" + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TBX" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_high_s16", + "name": "vqsubb_s8", "arguments": [ - "int8x8_t r", - "int16x8_t a", - "int16x8_t b" + "int8_t a", + "int8_t b" ], "return_type": { - "value": "int8x16_t" + "value": "int8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Bn" }, "b": { - "register": "Vm.8H" - }, - "r": { - "register": "Vd.8B" + "register": "Bm" } }, "Architectures": [ @@ -77196,30 +281041,26 @@ ], "instructions": [ [ - "RADDHN2" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_high_s32", + "name": "vqsubb_u8", "arguments": [ - "int16x4_t r", - "int32x4_t a", - "int32x4_t b" + "uint8_t a", + "uint8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "uint8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Bn" }, "b": { - "register": "Vm.4S" - }, - "r": { - "register": "Vd.4H" + "register": "Bm" } }, "Architectures": [ @@ -77227,30 +281068,26 @@ ], "instructions": [ [ - "RADDHN2" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_high_s64", + "name": "vqsubd_s64", "arguments": [ - "int32x2_t r", - "int64x2_t a", - "int64x2_t b" + "int64_t a", + "int64_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Dn" }, "b": { - "register": "Vm.2D" - }, - "r": { - "register": "Vd.2S" + "register": "Dm" } }, "Architectures": [ @@ -77258,30 +281095,26 @@ ], "instructions": [ [ - "RADDHN2" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_high_u16", + "name": "vqsubd_u64", "arguments": [ - "uint8x8_t r", - "uint16x8_t a", - "uint16x8_t b" + "uint64_t a", + "uint64_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Dn" }, "b": { - "register": "Vm.8H" - }, - "r": { - "register": "Vd.8B" + "register": "Dm" } }, "Architectures": [ @@ -77289,30 +281122,26 @@ ], "instructions": [ [ - "RADDHN2" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_high_u32", + "name": "vqsubh_s16", "arguments": [ - "uint16x4_t r", - "uint32x4_t a", - "uint32x4_t b" + "int16_t a", + "int16_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "int16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Hn" }, "b": { - "register": "Vm.4S" - }, - "r": { - "register": "Vd.4H" + "register": "Hm" } }, "Architectures": [ @@ -77320,30 +281149,26 @@ ], "instructions": [ [ - "RADDHN2" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_high_u64", + "name": "vqsubh_u16", "arguments": [ - "uint32x2_t r", - "uint64x2_t a", - "uint64x2_t b" + "uint16_t a", + "uint16_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "uint16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Hn" }, "b": { - "register": "Vm.2D" - }, - "r": { - "register": "Vd.2S" + "register": "Hm" } }, "Architectures": [ @@ -77351,19 +281176,19 @@ ], "instructions": [ [ - "RADDHN2" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_s16", + "name": "vqsubq_s16", "arguments": [ "int16x8_t a", "int16x8_t b" ], "return_type": { - "value": "int8x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -77380,19 +281205,19 @@ ], "instructions": [ [ - "RADDHN" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_s32", + "name": "vqsubq_s32", "arguments": [ "int32x4_t a", "int32x4_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -77409,19 +281234,19 @@ ], "instructions": [ [ - "RADDHN" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_s64", + "name": "vqsubq_s64", "arguments": [ "int64x2_t a", "int64x2_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { @@ -77438,19 +281263,48 @@ ], "instructions": [ [ - "RADDHN" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_u16", + "name": "vqsubq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vqsubq_u16", "arguments": [ "uint16x8_t a", "uint16x8_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { @@ -77467,19 +281321,19 @@ ], "instructions": [ [ - "RADDHN" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_u32", + "name": "vqsubq_u32", "arguments": [ "uint32x4_t a", "uint32x4_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { @@ -77496,19 +281350,19 @@ ], "instructions": [ [ - "RADDHN" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vraddhn_u64", + "name": "vqsubq_u64", "arguments": [ "uint64x2_t a", "uint64x2_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { @@ -77525,47 +281379,55 @@ ], "instructions": [ [ - "RADDHN" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrax1q_u64", + "name": "vqsubq_u8", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.16B" }, - "b": {} + "b": { + "register": "Vm.16B" + } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "RAX1" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrbit_p8", + "name": "vqsubs_s32", "arguments": [ - "poly8x8_t a" + "int32_t a", + "int32_t b" ], "return_type": { - "value": "poly8x8_t" + "value": "int32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Sn" + }, + "b": { + "register": "Sm" } }, "Architectures": [ @@ -77573,22 +281435,26 @@ ], "instructions": [ [ - "RBIT" + "SQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrbit_s8", + "name": "vqsubs_u32", "arguments": [ - "int8x8_t a" + "uint32_t a", + "uint32_t b" ], "return_type": { - "value": "int8x8_t" + "value": "uint32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Sn" + }, + "b": { + "register": "Sm" } }, "Architectures": [ @@ -77596,22 +281462,26 @@ ], "instructions": [ [ - "RBIT" + "UQSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrbit_u8", + "name": "vqtbl1_p8", "arguments": [ - "uint8x8_t a" + "poly8x16_t t", + "uint8x8_t idx" ], "return_type": { - "value": "uint8x8_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "idx": { + "register": "Vm.8B" + }, + "t": { + "register": "Vn.16B" } }, "Architectures": [ @@ -77619,21 +281489,25 @@ ], "instructions": [ [ - "RBIT" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrbitq_p8", + "name": "vqtbl1_s8", "arguments": [ - "poly8x16_t a" + "int8x16_t t", + "uint8x8_t idx" ], "return_type": { - "value": "poly8x16_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { + "idx": { + "register": "Vm.8B" + }, + "t": { "register": "Vn.16B" } }, @@ -77642,21 +281516,25 @@ ], "instructions": [ [ - "RBIT" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrbitq_s8", + "name": "vqtbl1_u8", "arguments": [ - "int8x16_t a" + "uint8x16_t t", + "uint8x8_t idx" ], "return_type": { - "value": "int8x16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { + "idx": { + "register": "Vm.8B" + }, + "t": { "register": "Vn.16B" } }, @@ -77665,21 +281543,25 @@ ], "instructions": [ [ - "RBIT" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrbitq_u8", + "name": "vqtbl1q_p8", "arguments": [ - "uint8x16_t a" + "poly8x16_t t", + "uint8x16_t idx" ], "return_type": { - "value": "uint8x16_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { + "idx": { + "register": "Vm.16B" + }, + "t": { "register": "Vn.16B" } }, @@ -77688,71 +281570,83 @@ ], "instructions": [ [ - "RBIT" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpe_f16", + "name": "vqtbl1q_s8", "arguments": [ - "float16x4_t a" + "int8x16_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "idx": { + "register": "Vm.16B" + }, + "t": { + "register": "Vn.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FRECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpe_f32", + "name": "vqtbl1q_u8", "arguments": [ - "float32x2_t a" + "uint8x16_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "idx": { + "register": "Vm.16B" + }, + "t": { + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FRECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpe_f64", + "name": "vqtbl2_p8", "arguments": [ - "float64x1_t a" + "poly8x16x2_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float64x1_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ @@ -77760,47 +281654,59 @@ ], "instructions": [ [ - "FRECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpe_u32", + "name": "vqtbl2_s8", "arguments": [ - "uint32x2_t a" + "int8x16x2_t t", + "uint8x8_t idx" ], "return_type": { - "value": "uint32x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecped_f64", + "name": "vqtbl2_u8", "arguments": [ - "float64_t a" + "uint8x16x2_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float64_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ @@ -77808,22 +281714,29 @@ ], "instructions": [ [ - "FRECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpeh_f16", + "name": "vqtbl2q_p8", "arguments": [ - "float16_t a" + "poly8x16x2_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ @@ -77831,71 +281744,92 @@ ], "instructions": [ [ - "FRECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpeq_f16", + "name": "vqtbl2q_s8", "arguments": [ - "float16x8_t a" + "int8x16x2_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FRECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpeq_f32", + "name": "vqtbl2q_u8", "arguments": [ - "float32x4_t a" + "uint8x16x2_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FRECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpeq_f64", + "name": "vqtbl3_p8", "arguments": [ - "float64x2_t a" + "poly8x16x3_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float64x2_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -77903,47 +281837,65 @@ ], "instructions": [ [ - "FRECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpeq_u32", + "name": "vqtbl3_s8", "arguments": [ - "uint32x4_t a" + "int8x16x3_t t", + "uint8x8_t idx" ], "return_type": { - "value": "uint32x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpes_f32", + "name": "vqtbl3_u8", "arguments": [ - "float32_t a" + "uint8x16x3_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float32_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -77951,83 +281903,98 @@ ], "instructions": [ [ - "FRECPE" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecps_f16", + "name": "vqtbl3q_p8", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "poly8x16x3_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "idx": { + "register": "Vm.16B" }, - "b": { - "register": "Vm.4H" + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FRECPS" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecps_f32", + "name": "vqtbl3q_s8", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "int8x16x3_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "idx": { + "register": "Vm.16B" }, - "b": { - "register": "Vm.2S" + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FRECPS" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecps_f64", + "name": "vqtbl3q_u8", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "uint8x16x3_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float64x1_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "idx": { + "register": "Vm.16B" }, - "b": { - "register": "Dm" + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -78035,26 +282002,35 @@ ], "instructions": [ [ - "FRECPS" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpsd_f64", + "name": "vqtbl4_p8", "arguments": [ - "float64_t a", - "float64_t b" + "poly8x16x4_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float64_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "idx": { + "register": "Vm.8B" }, - "b": { - "register": "Dm" + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -78062,26 +282038,35 @@ ], "instructions": [ [ - "FRECPS" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpsh_f16", + "name": "vqtbl4_s8", "arguments": [ - "float16_t a", - "float16_t b" + "int8x16x4_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "idx": { + "register": "Vm.8B" }, - "b": { - "register": "Hm" + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -78089,83 +282074,107 @@ ], "instructions": [ [ - "FRECPS" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpsq_f16", + "name": "vqtbl4_u8", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "uint8x16x4_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "idx": { + "register": "Vm.8B" }, - "b": { - "register": "Vm.8H" + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "FRECPS" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpsq_f32", + "name": "vqtbl4q_p8", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "poly8x16x4_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32x4_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "idx": { + "register": "Vm.16B" }, - "b": { - "register": "Vm.4S" + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FRECPS" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpsq_f64", + "name": "vqtbl4q_s8", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "int8x16x4_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float64x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "idx": { + "register": "Vm.16B" }, - "b": { - "register": "Vm.2D" + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -78173,26 +282182,35 @@ ], "instructions": [ [ - "FRECPS" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpss_f32", + "name": "vqtbl4q_u8", "arguments": [ - "float32_t a", - "float32_t b" + "uint8x16x4_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "idx": { + "register": "Vm.16B" }, - "b": { - "register": "Sm" + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -78200,22 +282218,30 @@ ], "instructions": [ [ - "FRECPS" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpxd_f64", + "name": "vqtbx1_p8", "arguments": [ - "float64_t a" + "poly8x8_t a", + "poly8x16_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float64_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t": { + "register": "Vn.16B" } }, "Architectures": [ @@ -78223,22 +282249,30 @@ ], "instructions": [ [ - "FRECPX" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpxh_f16", + "name": "vqtbx1_s8", "arguments": [ - "float16_t a" + "int8x8_t a", + "int8x16_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t": { + "register": "Vn.16B" } }, "Architectures": [ @@ -78246,22 +282280,30 @@ ], "instructions": [ [ - "FRECPX" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrecpxs_f32", + "name": "vqtbx1_u8", "arguments": [ - "float32_t a" + "uint8x8_t a", + "uint8x16_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float32_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t": { + "register": "Vn.16B" } }, "Architectures": [ @@ -78269,47 +282311,61 @@ ], "instructions": [ [ - "FRECPX" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_f32", + "name": "vqtbx1q_p8", "arguments": [ - "float32x2_t a" + "poly8x16_t a", + "poly8x16_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t": { + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_f64", + "name": "vqtbx1q_s8", "arguments": [ - "float64x1_t a" + "int8x16_t a", + "int8x16_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t": { + "register": "Vn.16B" } }, "Architectures": [ @@ -78317,321 +282373,456 @@ ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_p16", + "name": "vqtbx1q_u8", "arguments": [ - "poly16x4_t a" + "uint8x16_t a", + "uint8x16_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t": { + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_p64", + "name": "vqtbx2_p8", "arguments": [ - "poly64x1_t a" + "poly8x8_t a", + "poly8x16x2_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_p8", + "name": "vqtbx2_s8", "arguments": [ - "poly8x8_t a" + "int8x8_t a", + "int8x16x2_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_s16", + "name": "vqtbx2_u8", "arguments": [ - "int16x4_t a" + "uint8x8_t a", + "uint8x16x2_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_s32", + "name": "vqtbx2q_p8", "arguments": [ - "int32x2_t a" + "poly8x16_t a", + "poly8x16x2_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_s64", + "name": "vqtbx2q_s8", "arguments": [ - "int64x1_t a" + "int8x16_t a", + "int8x16x2_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_s8", + "name": "vqtbx2q_u8", "arguments": [ - "int8x8_t a" + "uint8x16_t a", + "uint8x16x2_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_u16", + "name": "vqtbx3_p8", "arguments": [ - "uint16x4_t a" + "poly8x8_t a", + "poly8x16x3_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_u32", + "name": "vqtbx3_s8", "arguments": [ - "uint32x2_t a" + "int8x8_t a", + "int8x16x3_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_u64", + "name": "vqtbx3_u8", "arguments": [ - "uint64x1_t a" + "uint8x8_t a", + "uint8x16x3_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f16_u8", + "name": "vqtbx3q_p8", "arguments": [ - "uint8x8_t a" + "poly8x16_t a", + "poly8x16x3_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float16x4_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_f16", + "name": "vqtbx3q_s8", "arguments": [ - "float16x4_t a" + "int8x16_t a", + "int8x16x3_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_f64", + "name": "vqtbx3q_u8", "arguments": [ - "float64x1_t a" + "uint8x16_t a", + "uint8x16x3_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -78639,271 +282830,393 @@ ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_p16", + "name": "vqtbx4_p8", "arguments": [ - "poly16x4_t a" + "poly8x8_t a", + "poly8x16x4_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_p8", + "name": "vqtbx4_s8", "arguments": [ - "poly8x8_t a" + "int8x8_t a", + "int8x16x4_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_s16", + "name": "vqtbx4_u8", "arguments": [ - "int16x4_t a" + "uint8x8_t a", + "uint8x16x4_t t", + "uint8x8_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_s32", + "name": "vqtbx4q_p8", "arguments": [ - "int32x2_t a" + "poly8x16_t a", + "poly8x16x4_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_s64", + "name": "vqtbx4q_s8", "arguments": [ - "int64x1_t a" + "int8x16_t a", + "int8x16x4_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_s8", + "name": "vqtbx4q_u8", "arguments": [ - "int8x8_t a" + "uint8x16_t a", + "uint8x16x4_t t", + "uint8x16_t idx" ], "return_type": { - "value": "float32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.16B" + }, + "idx": { + "register": "Vm.16B" + }, + "t.val[0]": { + "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_u16", + "name": "vraddhn_high_s16", "arguments": [ - "uint16x4_t a" + "int8x8_t r", + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "float32x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_u32", + "name": "vraddhn_high_s32", "arguments": [ - "uint32x2_t a" + "int16x4_t r", + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "float32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_u64", + "name": "vraddhn_high_s64", "arguments": [ - "uint64x1_t a" + "int32x2_t r", + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "float32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f32_u8", + "name": "vraddhn_high_u16", "arguments": [ - "uint8x8_t a" + "uint8x8_t r", + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "float32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, + "r": { "register": "Vd.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_f16", + "name": "vraddhn_high_u32", "arguments": [ - "float16x4_t a" + "uint16x4_t r", + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "float64x1_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, + "r": { "register": "Vd.4H" } }, @@ -78912,21 +283225,29 @@ ], "instructions": [ [ - "NOP" + "RADDHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_f32", + "name": "vraddhn_high_u64", "arguments": [ - "float32x2_t a" + "uint32x2_t r", + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "float64x1_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, + "r": { "register": "Vd.2S" } }, @@ -78935,160 +283256,200 @@ ], "instructions": [ [ - "NOP" + "RADDHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_p16", + "name": "vraddhn_s16", "arguments": [ - "poly16x4_t a" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "float64x1_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_p64", + "name": "vraddhn_s32", "arguments": [ - "poly64x1_t a" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "float64x1_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_p8", + "name": "vraddhn_s64", "arguments": [ - "poly8x8_t a" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "float64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_s16", + "name": "vraddhn_u16", "arguments": [ - "int16x4_t a" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "float64x1_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_s32", + "name": "vraddhn_u32", "arguments": [ - "int32x2_t a" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "float64x1_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_s64", + "name": "vraddhn_u64", "arguments": [ - "int64x1_t a" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "float64x1_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "NOP" + "RADDHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_s8", + "name": "vrax1q_u64", "arguments": [ - "int8x8_t a" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "float64x1_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ @@ -79096,22 +283457,22 @@ ], "instructions": [ [ - "NOP" + "RAX1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_u16", + "name": "vrbit_p8", "arguments": [ - "uint16x4_t a" + "poly8x8_t a" ], "return_type": { - "value": "float64x1_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.8B" } }, "Architectures": [ @@ -79119,22 +283480,22 @@ ], "instructions": [ [ - "NOP" + "RBIT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_u32", + "name": "vrbit_s8", "arguments": [ - "uint32x2_t a" + "int8x8_t a" ], "return_type": { - "value": "float64x1_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.8B" } }, "Architectures": [ @@ -79142,22 +283503,22 @@ ], "instructions": [ [ - "NOP" + "RBIT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_u64", + "name": "vrbit_u8", "arguments": [ - "uint64x1_t a" + "uint8x8_t a" ], "return_type": { - "value": "float64x1_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vn.8B" } }, "Architectures": [ @@ -79165,22 +283526,22 @@ ], "instructions": [ [ - "NOP" + "RBIT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_f64_u8", + "name": "vrbitq_p8", "arguments": [ - "uint8x8_t a" + "poly8x16_t a" ], "return_type": { - "value": "float64x1_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.16B" } }, "Architectures": [ @@ -79188,144 +283549,140 @@ ], "instructions": [ [ - "NOP" + "RBIT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_f16", + "name": "vrbitq_s8", "arguments": [ - "float16x4_t a" + "int8x16_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "RBIT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_f32", + "name": "vrbitq_u8", "arguments": [ - "float32x2_t a" + "uint8x16_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "RBIT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_f64", + "name": "vrecpe_f16", "arguments": [ - "float64x1_t a" + "float16x4_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vn.4H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_p64", + "name": "vrecpe_f32", "arguments": [ - "poly64x1_t a" + "float32x2_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vn.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_p8", + "name": "vrecpe_f64", "arguments": [ - "poly8x8_t a" + "float64x1_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_s16", + "name": "vrecpe_u32", "arguments": [ - "int16x4_t a" + "uint32x2_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.2S" } }, "Architectures": [ @@ -79335,97 +283692,92 @@ ], "instructions": [ [ - "NOP" + "URECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_s32", + "name": "vrecped_f64", "arguments": [ - "int32x2_t a" + "float64_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_s64", + "name": "vrecpeh_f16", "arguments": [ - "int64x1_t a" + "float16_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Hn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_s8", + "name": "vrecpeq_f16", "arguments": [ - "int8x8_t a" + "float16x8_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.8H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_u16", + "name": "vrecpeq_f32", "arguments": [ - "uint16x4_t a" + "float32x4_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.4S" } }, "Architectures": [ @@ -79435,47 +283787,45 @@ ], "instructions": [ [ - "NOP" + "FRECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_u32", + "name": "vrecpeq_f64", "arguments": [ - "uint32x2_t a" + "float64x2_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_u64", + "name": "vrecpeq_u32", "arguments": [ - "uint64x1_t a" + "uint32x4_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vn.4S" } }, "Architectures": [ @@ -79485,47 +283835,49 @@ ], "instructions": [ [ - "NOP" + "URECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p16_u8", + "name": "vrecpes_f32", "arguments": [ - "uint8x8_t a" + "float32_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Sn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_f16", + "name": "vrecps_f16", "arguments": [ - "float16x4_t a" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "poly64x1_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" } }, "Architectures": [ @@ -79534,46 +283886,55 @@ ], "instructions": [ [ - "NOP" + "FRECPS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_f32", + "name": "vrecps_f32", "arguments": [ - "float32x2_t a" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "poly64x1_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_f64", + "name": "vrecps_f64", "arguments": [ - "float64x1_t a" + "float64x1_t a", + "float64x1_t b" ], "return_type": { - "value": "poly64x1_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Dn" + }, + "b": { + "register": "Dm" } }, "Architectures": [ @@ -79581,70 +283942,80 @@ ], "instructions": [ [ - "NOP" + "FRECPS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_p16", + "name": "vrecpsd_f64", "arguments": [ - "poly16x4_t a" + "float64_t a", + "float64_t b" ], "return_type": { - "value": "poly64x1_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Dn" + }, + "b": { + "register": "Dm" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_p8", + "name": "vrecpsh_f16", "arguments": [ - "poly8x8_t a" + "float16_t a", + "float16_t b" ], "return_type": { - "value": "poly64x1_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Hn" + }, + "b": { + "register": "Hm" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_s16", + "name": "vrecpsq_f16", "arguments": [ - "int16x4_t a" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "poly64x1_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" } }, "Architectures": [ @@ -79653,166 +284024,174 @@ ], "instructions": [ [ - "NOP" + "FRECPS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_s32", + "name": "vrecpsq_f32", "arguments": [ - "int32x2_t a" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "poly64x1_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_s8", + "name": "vrecpsq_f64", "arguments": [ - "int8x8_t a" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "poly64x1_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_u16", + "name": "vrecpss_f32", "arguments": [ - "uint16x4_t a" + "float32_t a", + "float32_t b" ], "return_type": { - "value": "poly64x1_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Sn" + }, + "b": { + "register": "Sm" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_u32", + "name": "vrecpxd_f64", "arguments": [ - "uint32x2_t a" + "float64_t a" ], "return_type": { - "value": "poly64x1_t" + "value": "float64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Dn" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_u64", + "name": "vrecpxh_f16", "arguments": [ - "uint64x1_t a" + "float16_t a" ], "return_type": { - "value": "poly64x1_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Hn" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p64_u8", + "name": "vrecpxs_f32", "arguments": [ - "uint8x8_t a" + "float32_t a" ], "return_type": { - "value": "poly64x1_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Sn" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "NOP" + "FRECPX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_f16", + "name": "vreinterpret_f16_f32", "arguments": [ - "float16x4_t a" + "float32x2_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.2S" } }, "Architectures": [ @@ -79828,21 +284207,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_f32", + "name": "vreinterpret_f16_f64", "arguments": [ - "float32x2_t a" + "float64x1_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -79853,19 +284230,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_f64", + "name": "vreinterpret_f16_p16", "arguments": [ - "float64x1_t a" + "poly16x4_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -79876,20 +284255,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_p16", + "name": "vreinterpret_f16_p64", "arguments": [ - "poly16x4_t a" + "poly64x1_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -79901,19 +284279,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_p64", + "name": "vreinterpret_f16_p8", "arguments": [ - "poly64x1_t a" + "poly8x8_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -79925,12 +284304,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_s16", + "name": "vreinterpret_f16_s16", "arguments": [ "int16x4_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { @@ -79950,12 +284329,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_s32", + "name": "vreinterpret_f16_s32", "arguments": [ "int32x2_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { @@ -79975,12 +284354,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_s64", + "name": "vreinterpret_f16_s64", "arguments": [ "int64x1_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { @@ -80000,12 +284379,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_s8", + "name": "vreinterpret_f16_s8", "arguments": [ "int8x8_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { @@ -80025,12 +284404,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_u16", + "name": "vreinterpret_f16_u16", "arguments": [ "uint16x4_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { @@ -80050,12 +284429,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_u32", + "name": "vreinterpret_f16_u32", "arguments": [ "uint32x2_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { @@ -80075,12 +284454,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_u64", + "name": "vreinterpret_f16_u64", "arguments": [ "uint64x1_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { @@ -80100,12 +284479,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_p8_u8", + "name": "vreinterpret_f16_u8", "arguments": [ "uint8x8_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { @@ -80125,12 +284504,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_f16", + "name": "vreinterpret_f32_f16", "arguments": [ "float16x4_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80150,37 +284529,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_f32", - "arguments": [ - "float32x2_t a" - ], - "return_type": { - "value": "int16x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.2S" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_f64", + "name": "vreinterpret_f32_f64", "arguments": [ "float64x1_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80198,12 +284552,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_p16", + "name": "vreinterpret_f32_p16", "arguments": [ "poly16x4_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80223,19 +284577,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_p64", + "name": "vreinterpret_f32_p8", "arguments": [ - "poly64x1_t a" + "poly8x8_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -80247,16 +284602,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_p8", + "name": "vreinterpret_f32_s16", "arguments": [ - "poly8x8_t a" + "int16x4_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.4H" } }, "Architectures": [ @@ -80272,12 +284627,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_s32", + "name": "vreinterpret_f32_s32", "arguments": [ "int32x2_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80297,12 +284652,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_s64", + "name": "vreinterpret_f32_s64", "arguments": [ "int64x1_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80322,12 +284677,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_s8", + "name": "vreinterpret_f32_s8", "arguments": [ "int8x8_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80347,12 +284702,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_u16", + "name": "vreinterpret_f32_u16", "arguments": [ "uint16x4_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80372,12 +284727,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_u32", + "name": "vreinterpret_f32_u32", "arguments": [ "uint32x2_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80397,12 +284752,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_u64", + "name": "vreinterpret_f32_u64", "arguments": [ "uint64x1_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80422,12 +284777,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s16_u8", + "name": "vreinterpret_f32_u8", "arguments": [ "uint8x8_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -80447,12 +284802,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_f16", + "name": "vreinterpret_f64_f16", "arguments": [ "float16x4_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { @@ -80460,8 +284815,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80472,12 +284825,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_f32", + "name": "vreinterpret_f64_f32", "arguments": [ "float32x2_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { @@ -80485,8 +284838,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80497,16 +284848,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_f64", + "name": "vreinterpret_f64_p16", "arguments": [ - "float64x1_t a" + "poly16x4_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.4H" } }, "Architectures": [ @@ -80520,21 +284871,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_p16", + "name": "vreinterpret_f64_p64", "arguments": [ - "poly16x4_t a" + "poly64x1_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80545,20 +284894,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_p64", + "name": "vreinterpret_f64_p8", "arguments": [ - "poly64x1_t a" + "poly8x8_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.8B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -80569,21 +284917,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_p8", + "name": "vreinterpret_f64_s16", "arguments": [ - "poly8x8_t a" + "int16x4_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80594,21 +284940,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_s16", + "name": "vreinterpret_f64_s32", "arguments": [ - "int16x4_t a" + "int32x2_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80619,12 +284963,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_s64", + "name": "vreinterpret_f64_s64", "arguments": [ "int64x1_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { @@ -80632,8 +284976,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80644,12 +284986,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_s8", + "name": "vreinterpret_f64_s8", "arguments": [ "int8x8_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { @@ -80657,8 +284999,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80669,12 +285009,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_u16", + "name": "vreinterpret_f64_u16", "arguments": [ "uint16x4_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { @@ -80682,8 +285022,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80694,12 +285032,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_u32", + "name": "vreinterpret_f64_u32", "arguments": [ "uint32x2_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { @@ -80707,8 +285045,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80719,12 +285055,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_u64", + "name": "vreinterpret_f64_u64", "arguments": [ "uint64x1_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { @@ -80732,8 +285068,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80744,12 +285078,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s32_u8", + "name": "vreinterpret_f64_u8", "arguments": [ "uint8x8_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { @@ -80757,8 +285091,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -80769,12 +285101,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_f16", + "name": "vreinterpret_p16_f16", "arguments": [ "float16x4_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { @@ -80794,12 +285126,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_f32", + "name": "vreinterpret_p16_f32", "arguments": [ "float32x2_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { @@ -80819,12 +285151,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_f64", + "name": "vreinterpret_p16_f64", "arguments": [ "float64x1_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { @@ -80842,20 +285174,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_p16", + "name": "vreinterpret_p16_p64", "arguments": [ - "poly16x4_t a" + "poly64x1_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -80867,19 +285198,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_p64", + "name": "vreinterpret_p16_p8", "arguments": [ - "poly64x1_t a" + "poly8x8_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -80891,16 +285223,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_p8", + "name": "vreinterpret_p16_s16", "arguments": [ - "poly8x8_t a" + "int16x4_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.4H" } }, "Architectures": [ @@ -80916,16 +285248,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_s16", + "name": "vreinterpret_p16_s32", "arguments": [ - "int16x4_t a" + "int32x2_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.2S" } }, "Architectures": [ @@ -80941,16 +285273,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_s32", + "name": "vreinterpret_p16_s64", "arguments": [ - "int32x2_t a" + "int64x1_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.1D" } }, "Architectures": [ @@ -80966,12 +285298,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_s8", + "name": "vreinterpret_p16_s8", "arguments": [ "int8x8_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { @@ -80991,12 +285323,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_u16", + "name": "vreinterpret_p16_u16", "arguments": [ "uint16x4_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { @@ -81016,12 +285348,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_u32", + "name": "vreinterpret_p16_u32", "arguments": [ "uint32x2_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { @@ -81041,12 +285373,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_u64", + "name": "vreinterpret_p16_u64", "arguments": [ "uint64x1_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { @@ -81066,12 +285398,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s64_u8", + "name": "vreinterpret_p16_u8", "arguments": [ "uint8x8_t a" ], "return_type": { - "value": "int64x1_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { @@ -81091,12 +285423,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_f16", + "name": "vreinterpret_p64_f16", "arguments": [ "float16x4_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81104,7 +285436,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81116,12 +285447,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_f32", + "name": "vreinterpret_p64_f32", "arguments": [ "float32x2_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81129,7 +285460,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81141,12 +285471,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_f64", + "name": "vreinterpret_p64_f64", "arguments": [ "float64x1_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81164,43 +285494,18 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_p16", + "name": "vreinterpret_p64_p16", "arguments": [ "poly16x4_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { "register": "Vd.4H" } }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_p64", - "arguments": [ - "poly64x1_t a" - ], - "return_type": { - "value": "int8x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.1D" - } - }, "Architectures": [ "A32", "A64" @@ -81213,12 +285518,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_p8", + "name": "vreinterpret_p64_p8", "arguments": [ "poly8x8_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81226,7 +285531,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81238,12 +285542,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_s16", + "name": "vreinterpret_p64_s16", "arguments": [ "int16x4_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81251,7 +285555,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81263,12 +285566,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_s32", + "name": "vreinterpret_p64_s32", "arguments": [ "int32x2_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81276,7 +285579,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81288,20 +285590,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_s64", + "name": "vreinterpret_p64_s8", "arguments": [ - "int64x1_t a" + "int8x8_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.8B" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81313,12 +285614,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_u16", + "name": "vreinterpret_p64_u16", "arguments": [ "uint16x4_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81326,7 +285627,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81338,12 +285638,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_u32", + "name": "vreinterpret_p64_u32", "arguments": [ "uint32x2_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81351,7 +285651,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81363,12 +285662,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_u64", + "name": "vreinterpret_p64_u64", "arguments": [ "uint64x1_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81376,7 +285675,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81388,12 +285686,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_s8_u8", + "name": "vreinterpret_p64_u8", "arguments": [ "uint8x8_t a" ], "return_type": { - "value": "int8x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { @@ -81401,7 +285699,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -81413,12 +285710,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_f16", + "name": "vreinterpret_p8_f16", "arguments": [ "float16x4_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { @@ -81438,12 +285735,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_f32", + "name": "vreinterpret_p8_f32", "arguments": [ "float32x2_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { @@ -81463,12 +285760,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_f64", + "name": "vreinterpret_p8_f64", "arguments": [ "float64x1_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { @@ -81486,12 +285783,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_p16", + "name": "vreinterpret_p8_p16", "arguments": [ "poly16x4_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { @@ -81511,12 +285808,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_p64", + "name": "vreinterpret_p8_p64", "arguments": [ "poly64x1_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { @@ -81535,16 +285832,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_p8", + "name": "vreinterpret_p8_s16", "arguments": [ - "poly8x8_t a" + "int16x4_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.4H" } }, "Architectures": [ @@ -81560,16 +285857,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_s16", + "name": "vreinterpret_p8_s32", "arguments": [ - "int16x4_t a" + "int32x2_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.2S" } }, "Architectures": [ @@ -81585,16 +285882,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_s32", + "name": "vreinterpret_p8_s64", "arguments": [ - "int32x2_t a" + "int64x1_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.1D" } }, "Architectures": [ @@ -81610,16 +285907,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_s64", + "name": "vreinterpret_p8_s8", "arguments": [ - "int64x1_t a" + "int8x8_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.8B" } }, "Architectures": [ @@ -81635,16 +285932,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_s8", + "name": "vreinterpret_p8_u16", "arguments": [ - "int8x8_t a" + "uint16x4_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.4H" } }, "Architectures": [ @@ -81660,12 +285957,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_u32", + "name": "vreinterpret_p8_u32", "arguments": [ "uint32x2_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { @@ -81685,12 +285982,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_u64", + "name": "vreinterpret_p8_u64", "arguments": [ "uint64x1_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { @@ -81710,12 +286007,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u16_u8", + "name": "vreinterpret_p8_u8", "arguments": [ "uint8x8_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { @@ -81735,12 +286032,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_f16", + "name": "vreinterpret_s16_f16", "arguments": [ "float16x4_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -81760,12 +286057,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_f32", + "name": "vreinterpret_s16_f32", "arguments": [ "float32x2_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -81785,12 +286082,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_f64", + "name": "vreinterpret_s16_f64", "arguments": [ "float64x1_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -81808,12 +286105,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_p16", + "name": "vreinterpret_s16_p16", "arguments": [ "poly16x4_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -81833,12 +286130,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_p64", + "name": "vreinterpret_s16_p64", "arguments": [ "poly64x1_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -81857,12 +286154,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_p8", + "name": "vreinterpret_s16_p8", "arguments": [ "poly8x8_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -81882,16 +286179,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_s16", + "name": "vreinterpret_s16_s32", "arguments": [ - "int16x4_t a" + "int32x2_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.2S" } }, "Architectures": [ @@ -81907,16 +286204,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_s32", + "name": "vreinterpret_s16_s64", "arguments": [ - "int32x2_t a" + "int64x1_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.1D" } }, "Architectures": [ @@ -81932,16 +286229,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_s64", + "name": "vreinterpret_s16_s8", "arguments": [ - "int64x1_t a" + "int8x8_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.8B" } }, "Architectures": [ @@ -81957,16 +286254,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_s8", + "name": "vreinterpret_s16_u16", "arguments": [ - "int8x8_t a" + "uint16x4_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.4H" } }, "Architectures": [ @@ -81982,16 +286279,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_u16", + "name": "vreinterpret_s16_u32", "arguments": [ - "uint16x4_t a" + "uint32x2_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.2S" } }, "Architectures": [ @@ -82007,12 +286304,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_u64", + "name": "vreinterpret_s16_u64", "arguments": [ "uint64x1_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -82032,12 +286329,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u32_u8", + "name": "vreinterpret_s16_u8", "arguments": [ "uint8x8_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -82057,12 +286354,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_f16", + "name": "vreinterpret_s32_f16", "arguments": [ "float16x4_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -82082,12 +286379,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_f32", + "name": "vreinterpret_s32_f32", "arguments": [ "float32x2_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -82107,12 +286404,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_f64", + "name": "vreinterpret_s32_f64", "arguments": [ "float64x1_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -82130,12 +286427,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_p16", + "name": "vreinterpret_s32_p16", "arguments": [ "poly16x4_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -82155,12 +286452,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_p64", + "name": "vreinterpret_s32_p64", "arguments": [ "poly64x1_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -82179,12 +286476,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_p8", + "name": "vreinterpret_s32_p8", "arguments": [ "poly8x8_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -82204,12 +286501,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_s16", + "name": "vreinterpret_s32_s16", "arguments": [ "int16x4_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -82229,16 +286526,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_s32", + "name": "vreinterpret_s32_s64", "arguments": [ - "int32x2_t a" + "int64x1_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.1D" } }, "Architectures": [ @@ -82254,16 +286551,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_s64", + "name": "vreinterpret_s32_s8", "arguments": [ - "int64x1_t a" + "int8x8_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1D" + "register": "Vd.8B" } }, "Architectures": [ @@ -82279,16 +286576,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_s8", + "name": "vreinterpret_s32_u16", "arguments": [ - "int8x8_t a" + "uint16x4_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vd.4H" } }, "Architectures": [ @@ -82304,16 +286601,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_u16", + "name": "vreinterpret_s32_u32", "arguments": [ - "uint16x4_t a" + "uint32x2_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vd.2S" } }, "Architectures": [ @@ -82329,16 +286626,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_u32", + "name": "vreinterpret_s32_u64", "arguments": [ - "uint32x2_t a" + "uint64x1_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vd.1D" } }, "Architectures": [ @@ -82354,12 +286651,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u64_u8", + "name": "vreinterpret_s32_u8", "arguments": [ "uint8x8_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -82379,12 +286676,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_f16", + "name": "vreinterpret_s64_f16", "arguments": [ "float16x4_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82404,12 +286701,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_f32", + "name": "vreinterpret_s64_f32", "arguments": [ "float32x2_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82429,12 +286726,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_f64", + "name": "vreinterpret_s64_f64", "arguments": [ "float64x1_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82452,12 +286749,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_p16", + "name": "vreinterpret_s64_p16", "arguments": [ "poly16x4_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82477,12 +286774,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_p64", + "name": "vreinterpret_s64_p64", "arguments": [ "poly64x1_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82501,12 +286798,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_p8", + "name": "vreinterpret_s64_p8", "arguments": [ "poly8x8_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82526,12 +286823,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_s16", + "name": "vreinterpret_s64_s16", "arguments": [ "int16x4_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82551,12 +286848,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_s32", + "name": "vreinterpret_s64_s32", "arguments": [ "int32x2_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82576,37 +286873,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_s64", - "arguments": [ - "int64x1_t a" - ], - "return_type": { - "value": "uint8x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.1D" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_s8", + "name": "vreinterpret_s64_s8", "arguments": [ "int8x8_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82626,12 +286898,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_u16", + "name": "vreinterpret_s64_u16", "arguments": [ "uint16x4_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82651,12 +286923,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_u32", + "name": "vreinterpret_s64_u32", "arguments": [ "uint32x2_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82676,12 +286948,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpret_u8_u64", + "name": "vreinterpret_s64_u64", "arguments": [ "uint64x1_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { @@ -82701,16 +286973,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_f32", + "name": "vreinterpret_s64_u8", "arguments": [ - "float32x4_t a" + "uint8x8_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.8B" } }, "Architectures": [ @@ -82726,42 +286998,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_f64", - "arguments": [ - "float64x2_t a" - ], - "return_type": { - "value": "float16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_p128", + "name": "vreinterpret_s8_f16", "arguments": [ - "poly128_t a" + "float16x4_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1Q" + "register": "Vd.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -82773,16 +287023,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_p16", + "name": "vreinterpret_s8_f32", "arguments": [ - "poly16x8_t a" + "float32x2_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2S" } }, "Architectures": [ @@ -82798,20 +287048,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_p64", + "name": "vreinterpret_s8_f64", "arguments": [ - "poly64x2_t a" + "float64x1_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.1D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -82822,16 +287071,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_p8", + "name": "vreinterpret_s8_p16", "arguments": [ - "poly8x16_t a" + "poly16x4_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4H" } }, "Architectures": [ @@ -82847,20 +287096,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_s16", + "name": "vreinterpret_s8_p64", "arguments": [ - "int16x8_t a" + "poly64x1_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -82872,16 +287120,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_s32", + "name": "vreinterpret_s8_p8", "arguments": [ - "int32x4_t a" + "poly8x8_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.8B" } }, "Architectures": [ @@ -82897,16 +287145,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_s64", + "name": "vreinterpret_s8_s16", "arguments": [ - "int64x2_t a" + "int16x4_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.4H" } }, "Architectures": [ @@ -82922,16 +287170,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_s8", + "name": "vreinterpret_s8_s32", "arguments": [ - "int8x16_t a" + "int32x2_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.2S" } }, "Architectures": [ @@ -82947,16 +287195,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_u16", + "name": "vreinterpret_s8_s64", "arguments": [ - "uint16x8_t a" + "int64x1_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.1D" } }, "Architectures": [ @@ -82972,16 +287220,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_u32", + "name": "vreinterpret_s8_u16", "arguments": [ - "uint32x4_t a" + "uint16x4_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.4H" } }, "Architectures": [ @@ -82997,16 +287245,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_u64", + "name": "vreinterpret_s8_u32", "arguments": [ - "uint64x2_t a" + "uint32x2_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.2S" } }, "Architectures": [ @@ -83022,16 +287270,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f16_u8", + "name": "vreinterpret_s8_u64", "arguments": [ - "uint8x16_t a" + "uint64x1_t a" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.1D" } }, "Architectures": [ @@ -83047,16 +287295,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_f16", + "name": "vreinterpret_s8_u8", "arguments": [ - "float16x8_t a" + "uint8x8_t a" ], "return_type": { - "value": "float32x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.8B" } }, "Architectures": [ @@ -83072,39 +287320,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_f64", - "arguments": [ - "float64x2_t a" - ], - "return_type": { - "value": "float32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_p16", + "name": "vreinterpret_u16_f16", "arguments": [ - "poly16x8_t a" + "float16x4_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4H" } }, "Architectures": [ @@ -83120,16 +287345,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_p8", + "name": "vreinterpret_u16_f32", "arguments": [ - "poly8x16_t a" + "float32x2_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.2S" } }, "Architectures": [ @@ -83145,21 +287370,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_s16", + "name": "vreinterpret_u16_f64", "arguments": [ - "int16x8_t a" + "float64x1_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -83170,16 +287393,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_s32", + "name": "vreinterpret_u16_p16", "arguments": [ - "int32x4_t a" + "poly16x4_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.4H" } }, "Architectures": [ @@ -83195,20 +287418,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_s64", + "name": "vreinterpret_u16_p64", "arguments": [ - "int64x2_t a" + "poly64x1_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -83220,16 +287442,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_s8", + "name": "vreinterpret_u16_p8", "arguments": [ - "int8x16_t a" + "poly8x8_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8B" } }, "Architectures": [ @@ -83245,16 +287467,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_u16", + "name": "vreinterpret_u16_s16", "arguments": [ - "uint16x8_t a" + "int16x4_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4H" } }, "Architectures": [ @@ -83270,16 +287492,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_u32", + "name": "vreinterpret_u16_s32", "arguments": [ - "uint32x4_t a" + "int32x2_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.2S" } }, "Architectures": [ @@ -83295,16 +287517,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_u64", + "name": "vreinterpret_u16_s64", "arguments": [ - "uint64x2_t a" + "int64x1_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.1D" } }, "Architectures": [ @@ -83320,16 +287542,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f32_u8", + "name": "vreinterpret_u16_s8", "arguments": [ - "uint8x16_t a" + "int8x8_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8B" } }, "Architectures": [ @@ -83345,19 +287567,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_f16", + "name": "vreinterpret_u16_u32", "arguments": [ - "float16x8_t a" + "uint32x2_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83368,19 +287592,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_f32", + "name": "vreinterpret_u16_u64", "arguments": [ - "float32x4_t a" + "uint64x1_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.1D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83391,19 +287617,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_p128", + "name": "vreinterpret_u16_u8", "arguments": [ - "poly128_t a" + "uint8x8_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1Q" + "register": "Vd.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83414,19 +287642,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_p16", + "name": "vreinterpret_u32_f16", "arguments": [ - "poly16x8_t a" + "float16x4_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83437,19 +287667,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_p64", + "name": "vreinterpret_u32_f32", "arguments": [ - "poly64x2_t a" + "float32x2_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83460,16 +287692,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_p8", + "name": "vreinterpret_u32_f64", "arguments": [ - "poly8x16_t a" + "float64x1_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.1D" } }, "Architectures": [ @@ -83483,19 +287715,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_s16", + "name": "vreinterpret_u32_p16", "arguments": [ - "int16x8_t a" + "poly16x4_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83506,19 +287740,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_s32", + "name": "vreinterpret_u32_p64", "arguments": [ - "int32x4_t a" + "poly64x1_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.1D" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ @@ -83529,19 +287764,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_s64", + "name": "vreinterpret_u32_p8", "arguments": [ - "int64x2_t a" + "poly8x8_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83552,19 +287789,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_s8", + "name": "vreinterpret_u32_s16", "arguments": [ - "int8x16_t a" + "int16x4_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83575,19 +287814,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_u16", + "name": "vreinterpret_u32_s32", "arguments": [ - "uint16x8_t a" + "int32x2_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83598,19 +287839,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_u32", + "name": "vreinterpret_u32_s64", "arguments": [ - "uint32x4_t a" + "int64x1_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.1D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83621,16 +287864,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_u64", + "name": "vreinterpret_u32_s8", "arguments": [ - "uint64x2_t a" + "int8x8_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.8B" } }, "Architectures": [ @@ -83646,19 +287889,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_f64_u8", + "name": "vreinterpret_u32_u16", "arguments": [ - "uint8x16_t a" + "uint16x4_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83669,19 +287914,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_f16", + "name": "vreinterpret_u32_u64", "arguments": [ - "float16x8_t a" + "uint64x1_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.1D" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83693,19 +287939,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_f32", + "name": "vreinterpret_u32_u8", "arguments": [ - "float32x4_t a" + "uint8x8_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83717,19 +287964,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_f64", + "name": "vreinterpret_u64_f16", "arguments": [ - "float64x2_t a" + "float16x4_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1Q" + "register": "Vd.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -83740,19 +287989,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_p16", + "name": "vreinterpret_u64_f32", "arguments": [ - "poly16x8_t a" + "float32x2_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83764,20 +288014,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_p8", + "name": "vreinterpret_u64_f64", "arguments": [ - "poly8x16_t a" + "float64x1_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.1D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -83788,19 +288037,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_s16", + "name": "vreinterpret_u64_p16", "arguments": [ - "int16x8_t a" + "poly16x4_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83812,16 +288062,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_s32", + "name": "vreinterpret_u64_p64", "arguments": [ - "int32x4_t a" + "poly64x1_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.1D" } }, "Architectures": [ @@ -83836,19 +288086,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_s64", + "name": "vreinterpret_u64_p8", "arguments": [ - "int64x2_t a" + "poly8x8_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1Q" + "register": "Vd.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83860,19 +288111,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_s8", + "name": "vreinterpret_u64_s16", "arguments": [ - "int8x16_t a" + "int16x4_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83884,19 +288136,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_u16", + "name": "vreinterpret_u64_s32", "arguments": [ - "uint16x8_t a" + "int32x2_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83908,19 +288161,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_u32", + "name": "vreinterpret_u64_s64", "arguments": [ - "uint32x4_t a" + "int64x1_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.1D" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83932,19 +288186,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_u64", + "name": "vreinterpret_u64_s8", "arguments": [ - "uint64x2_t a" + "int8x8_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1Q" + "register": "Vd.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83956,19 +288211,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p128_u8", + "name": "vreinterpret_u64_u16", "arguments": [ - "uint8x16_t a" + "uint16x4_t a" ], "return_type": { - "value": "poly128_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -83980,16 +288236,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_f16", + "name": "vreinterpret_u64_u32", "arguments": [ - "float16x8_t a" + "uint32x2_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2S" } }, "Architectures": [ @@ -84005,16 +288261,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_f32", + "name": "vreinterpret_u64_u8", "arguments": [ - "float32x4_t a" + "uint8x8_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.8B" } }, "Architectures": [ @@ -84030,19 +288286,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_f64", + "name": "vreinterpret_u8_f16", "arguments": [ - "float64x2_t a" + "float16x4_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -84053,19 +288311,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_p128", + "name": "vreinterpret_u8_f32", "arguments": [ - "poly128_t a" + "float32x2_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1Q" + "register": "Vd.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84077,20 +288336,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_p64", + "name": "vreinterpret_u8_f64", "arguments": [ - "poly64x2_t a" + "float64x1_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.1D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -84101,16 +288359,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_p8", + "name": "vreinterpret_u8_p16", "arguments": [ - "poly8x16_t a" + "poly16x4_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4H" } }, "Architectures": [ @@ -84126,20 +288384,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_s16", + "name": "vreinterpret_u8_p64", "arguments": [ - "int16x8_t a" + "poly64x1_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -84151,16 +288408,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_s32", + "name": "vreinterpret_u8_p8", "arguments": [ - "int32x4_t a" + "poly8x8_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.8B" } }, "Architectures": [ @@ -84176,16 +288433,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_s64", + "name": "vreinterpret_u8_s16", "arguments": [ - "int64x2_t a" + "int16x4_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.4H" } }, "Architectures": [ @@ -84201,16 +288458,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_s8", + "name": "vreinterpret_u8_s32", "arguments": [ - "int8x16_t a" + "int32x2_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.2S" } }, "Architectures": [ @@ -84226,16 +288483,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_u16", + "name": "vreinterpret_u8_s64", "arguments": [ - "uint16x8_t a" + "int64x1_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.1D" } }, "Architectures": [ @@ -84251,16 +288508,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_u32", + "name": "vreinterpret_u8_s8", "arguments": [ - "uint32x4_t a" + "int8x8_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.8B" } }, "Architectures": [ @@ -84276,16 +288533,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_u64", + "name": "vreinterpret_u8_u16", "arguments": [ - "uint64x2_t a" + "uint16x4_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.4H" } }, "Architectures": [ @@ -84301,16 +288558,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p16_u8", + "name": "vreinterpret_u8_u32", "arguments": [ - "uint8x16_t a" + "uint32x2_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.2S" } }, "Architectures": [ @@ -84326,19 +288583,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_f16", + "name": "vreinterpret_u8_u64", "arguments": [ - "float16x8_t a" + "uint64x1_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.1D" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84350,12 +288608,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_f32", + "name": "vreinterpretq_f16_f32", "arguments": [ "float32x4_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { @@ -84363,6 +288621,7 @@ } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84374,12 +288633,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_f64", + "name": "vreinterpretq_f16_f64", "arguments": [ "float64x2_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { @@ -84397,16 +288656,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_p16", + "name": "vreinterpretq_f16_p128", "arguments": [ - "poly16x8_t a" + "poly128_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.1Q" } }, "Architectures": [ @@ -84421,19 +288680,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_p8", + "name": "vreinterpretq_f16_p16", "arguments": [ - "poly8x16_t a" + "poly16x8_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84445,16 +288705,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_s16", + "name": "vreinterpretq_f16_p64", "arguments": [ - "int16x8_t a" + "poly64x2_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2D" } }, "Architectures": [ @@ -84469,19 +288729,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_s32", + "name": "vreinterpretq_f16_p8", "arguments": [ - "int32x4_t a" + "poly8x16_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84493,19 +288754,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_s64", + "name": "vreinterpretq_f16_s16", "arguments": [ - "int64x2_t a" + "int16x8_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84517,19 +288779,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_s8", + "name": "vreinterpretq_f16_s32", "arguments": [ - "int8x16_t a" + "int32x4_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84541,19 +288804,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_u16", + "name": "vreinterpretq_f16_s64", "arguments": [ - "uint16x8_t a" + "int64x2_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2D" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84565,19 +288829,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_u32", + "name": "vreinterpretq_f16_s8", "arguments": [ - "uint32x4_t a" + "int8x16_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84589,19 +288854,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_u64", + "name": "vreinterpretq_f16_u16", "arguments": [ - "uint64x2_t a" + "uint16x8_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84613,19 +288879,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p64_u8", + "name": "vreinterpretq_f16_u32", "arguments": [ - "uint8x16_t a" + "uint32x4_t a" ], "return_type": { - "value": "poly64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84637,16 +288904,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_f16", + "name": "vreinterpretq_f16_u64", "arguments": [ - "float16x8_t a" + "uint64x2_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2D" } }, "Architectures": [ @@ -84662,16 +288929,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_f32", + "name": "vreinterpretq_f16_u8", "arguments": [ - "float32x4_t a" + "uint8x16_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.16B" } }, "Architectures": [ @@ -84687,19 +288954,21 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_f64", + "name": "vreinterpretq_f32_f16", "arguments": [ - "float64x2_t a" + "float16x8_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -84710,20 +288979,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_p128", + "name": "vreinterpretq_f32_f64", "arguments": [ - "poly128_t a" + "float64x2_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1Q" + "register": "Vd.2D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -84734,12 +289002,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_p16", + "name": "vreinterpretq_f32_p16", "arguments": [ "poly16x8_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { @@ -84759,19 +289027,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_p64", + "name": "vreinterpretq_f32_p8", "arguments": [ - "poly64x2_t a" + "poly8x16_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -84783,12 +289052,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_s16", + "name": "vreinterpretq_f32_s16", "arguments": [ "int16x8_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { @@ -84808,12 +289077,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_s32", + "name": "vreinterpretq_f32_s32", "arguments": [ "int32x4_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { @@ -84833,12 +289102,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_s64", + "name": "vreinterpretq_f32_s64", "arguments": [ "int64x2_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { @@ -84858,12 +289127,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_s8", + "name": "vreinterpretq_f32_s8", "arguments": [ "int8x16_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { @@ -84883,12 +289152,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_u16", + "name": "vreinterpretq_f32_u16", "arguments": [ "uint16x8_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { @@ -84908,12 +289177,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_u32", + "name": "vreinterpretq_f32_u32", "arguments": [ "uint32x4_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { @@ -84933,12 +289202,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_u64", + "name": "vreinterpretq_f32_u64", "arguments": [ "uint64x2_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { @@ -84958,12 +289227,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_p8_u8", + "name": "vreinterpretq_f32_u8", "arguments": [ "uint8x16_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { @@ -84983,12 +289252,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_f16", + "name": "vreinterpretq_f64_f16", "arguments": [ "float16x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -84996,8 +289265,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85008,12 +289275,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_f32", + "name": "vreinterpretq_f64_f32", "arguments": [ "float32x4_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -85021,8 +289288,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85033,16 +289298,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_f64", + "name": "vreinterpretq_f64_p128", "arguments": [ - "float64x2_t a" + "poly128_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.1Q" } }, "Architectures": [ @@ -85056,20 +289321,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_p128", + "name": "vreinterpretq_f64_p16", "arguments": [ - "poly128_t a" + "poly16x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.1Q" + "register": "Vd.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -85080,21 +289344,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_p16", + "name": "vreinterpretq_f64_p64", "arguments": [ - "poly16x8_t a" + "poly64x2_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85105,20 +289367,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_p64", + "name": "vreinterpretq_f64_p8", "arguments": [ - "poly64x2_t a" + "poly8x16_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -85129,21 +289390,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_p8", + "name": "vreinterpretq_f64_s16", "arguments": [ - "poly8x16_t a" + "int16x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85154,12 +289413,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_s32", + "name": "vreinterpretq_f64_s32", "arguments": [ "int32x4_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -85167,8 +289426,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85179,12 +289436,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_s64", + "name": "vreinterpretq_f64_s64", "arguments": [ "int64x2_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -85192,8 +289449,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85204,12 +289459,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_s8", + "name": "vreinterpretq_f64_s8", "arguments": [ "int8x16_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -85217,8 +289472,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85229,12 +289482,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_u16", + "name": "vreinterpretq_f64_u16", "arguments": [ "uint16x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -85242,8 +289495,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85254,12 +289505,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_u32", + "name": "vreinterpretq_f64_u32", "arguments": [ "uint32x4_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -85267,8 +289518,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85279,12 +289528,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_u64", + "name": "vreinterpretq_f64_u64", "arguments": [ "uint64x2_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -85304,12 +289553,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s16_u8", + "name": "vreinterpretq_f64_u8", "arguments": [ "uint8x16_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { @@ -85317,8 +289566,6 @@ } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -85329,12 +289576,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_f16", + "name": "vreinterpretq_p128_f16", "arguments": [ "float16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { @@ -85342,7 +289589,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85354,12 +289600,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_f32", + "name": "vreinterpretq_p128_f32", "arguments": [ "float32x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { @@ -85367,7 +289613,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85379,35 +289624,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_f64", + "name": "vreinterpretq_p128_f64", "arguments": [ "float64x2_t a" ], "return_type": { - "value": "int32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_p128", - "arguments": [ - "poly128_t a" - ], - "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { @@ -85415,7 +289637,6 @@ } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -85426,12 +289647,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_p16", + "name": "vreinterpretq_p128_p16", "arguments": [ "poly16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { @@ -85439,7 +289660,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85451,16 +289671,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_p64", + "name": "vreinterpretq_p128_p8", "arguments": [ - "poly64x2_t a" + "poly8x16_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.16B" } }, "Architectures": [ @@ -85475,20 +289695,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_p8", + "name": "vreinterpretq_p128_s16", "arguments": [ - "poly8x16_t a" + "int16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8H" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85500,20 +289719,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_s16", + "name": "vreinterpretq_p128_s32", "arguments": [ - "int16x8_t a" + "int32x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85525,20 +289743,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_s64", + "name": "vreinterpretq_p128_s64", "arguments": [ "int64x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.1Q" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85550,12 +289767,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_s8", + "name": "vreinterpretq_p128_s8", "arguments": [ "int8x16_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { @@ -85563,7 +289780,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85575,12 +289791,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_u16", + "name": "vreinterpretq_p128_u16", "arguments": [ "uint16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { @@ -85588,7 +289804,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85600,12 +289815,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_u32", + "name": "vreinterpretq_p128_u32", "arguments": [ "uint32x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { @@ -85613,7 +289828,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85625,20 +289839,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_u64", + "name": "vreinterpretq_p128_u64", "arguments": [ "uint64x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.1Q" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85650,12 +289863,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s32_u8", + "name": "vreinterpretq_p128_u8", "arguments": [ "uint8x16_t a" ], "return_type": { - "value": "int32x4_t" + "value": "poly128_t" }, "Arguments_Preparation": { "a": { @@ -85663,7 +289876,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85675,12 +289887,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_f16", + "name": "vreinterpretq_p16_f16", "arguments": [ "float16x8_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { @@ -85700,12 +289912,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_f32", + "name": "vreinterpretq_p16_f32", "arguments": [ "float32x4_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { @@ -85725,12 +289937,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_f64", + "name": "vreinterpretq_p16_f64", "arguments": [ "float64x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { @@ -85748,12 +289960,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_p128", + "name": "vreinterpretq_p16_p128", "arguments": [ "poly128_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { @@ -85772,20 +289984,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_p16", + "name": "vreinterpretq_p16_p64", "arguments": [ - "poly16x8_t a" + "poly64x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -85797,19 +290008,20 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_p64", + "name": "vreinterpretq_p16_p8", "arguments": [ - "poly64x2_t a" + "poly8x16_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -85821,16 +290033,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_p8", + "name": "vreinterpretq_p16_s16", "arguments": [ - "poly8x16_t a" + "int16x8_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8H" } }, "Architectures": [ @@ -85846,16 +290058,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_s16", + "name": "vreinterpretq_p16_s32", "arguments": [ - "int16x8_t a" + "int32x4_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4S" } }, "Architectures": [ @@ -85871,16 +290083,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_s32", + "name": "vreinterpretq_p16_s64", "arguments": [ - "int32x4_t a" + "int64x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.2D" } }, "Architectures": [ @@ -85896,12 +290108,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_s8", + "name": "vreinterpretq_p16_s8", "arguments": [ "int8x16_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { @@ -85921,12 +290133,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_u16", + "name": "vreinterpretq_p16_u16", "arguments": [ "uint16x8_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { @@ -85946,12 +290158,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_u32", + "name": "vreinterpretq_p16_u32", "arguments": [ "uint32x4_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { @@ -85971,12 +290183,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_u64", + "name": "vreinterpretq_p16_u64", "arguments": [ "uint64x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { @@ -85996,12 +290208,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s64_u8", + "name": "vreinterpretq_p16_u8", "arguments": [ "uint8x16_t a" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { @@ -86021,12 +290233,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_f16", + "name": "vreinterpretq_p64_f16", "arguments": [ "float16x8_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { @@ -86034,7 +290246,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86046,12 +290257,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_f32", + "name": "vreinterpretq_p64_f32", "arguments": [ "float32x4_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { @@ -86059,7 +290270,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86071,12 +290281,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_f64", + "name": "vreinterpretq_p64_f64", "arguments": [ "float64x2_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { @@ -86094,36 +290304,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_p128", - "arguments": [ - "poly128_t a" - ], - "return_type": { - "value": "int8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.1Q" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_p16", + "name": "vreinterpretq_p64_p16", "arguments": [ "poly16x8_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { @@ -86131,7 +290317,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86143,16 +290328,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_p64", + "name": "vreinterpretq_p64_p8", "arguments": [ - "poly64x2_t a" + "poly8x16_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.16B" } }, "Architectures": [ @@ -86167,20 +290352,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_p8", + "name": "vreinterpretq_p64_s16", "arguments": [ - "poly8x16_t a" + "int16x8_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8H" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86192,20 +290376,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_s16", + "name": "vreinterpretq_p64_s32", "arguments": [ - "int16x8_t a" + "int32x4_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86217,20 +290400,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_s32", + "name": "vreinterpretq_p64_s64", "arguments": [ - "int32x4_t a" + "int64x2_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86242,20 +290424,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_s64", + "name": "vreinterpretq_p64_s8", "arguments": [ - "int64x2_t a" + "int8x16_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.16B" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86267,12 +290448,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_u16", + "name": "vreinterpretq_p64_u16", "arguments": [ "uint16x8_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { @@ -86280,7 +290461,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86292,12 +290472,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_u32", + "name": "vreinterpretq_p64_u32", "arguments": [ "uint32x4_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { @@ -86305,7 +290485,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86317,12 +290496,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_u64", + "name": "vreinterpretq_p64_u64", "arguments": [ "uint64x2_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { @@ -86330,7 +290509,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86342,12 +290520,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_s8_u8", + "name": "vreinterpretq_p64_u8", "arguments": [ "uint8x16_t a" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { @@ -86355,7 +290533,6 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -86367,12 +290544,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_f16", + "name": "vreinterpretq_p8_f16", "arguments": [ "float16x8_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -86392,12 +290569,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_f32", + "name": "vreinterpretq_p8_f32", "arguments": [ "float32x4_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -86417,12 +290594,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_f64", + "name": "vreinterpretq_p8_f64", "arguments": [ "float64x2_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -86440,12 +290617,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_p128", + "name": "vreinterpretq_p8_p128", "arguments": [ "poly128_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -86464,12 +290641,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_p16", + "name": "vreinterpretq_p8_p16", "arguments": [ "poly16x8_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -86489,12 +290666,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_p64", + "name": "vreinterpretq_p8_p64", "arguments": [ "poly64x2_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -86513,16 +290690,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_p8", + "name": "vreinterpretq_p8_s16", "arguments": [ - "poly8x16_t a" + "int16x8_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8H" } }, "Architectures": [ @@ -86538,16 +290715,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_s16", + "name": "vreinterpretq_p8_s32", "arguments": [ - "int16x8_t a" + "int32x4_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4S" } }, "Architectures": [ @@ -86563,16 +290740,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_s32", + "name": "vreinterpretq_p8_s64", "arguments": [ - "int32x4_t a" + "int64x2_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.2D" } }, "Architectures": [ @@ -86588,16 +290765,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_s64", + "name": "vreinterpretq_p8_s8", "arguments": [ - "int64x2_t a" + "int8x16_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.16B" } }, "Architectures": [ @@ -86613,16 +290790,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_s8", + "name": "vreinterpretq_p8_u16", "arguments": [ - "int8x16_t a" + "uint16x8_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8H" } }, "Architectures": [ @@ -86638,12 +290815,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_u32", + "name": "vreinterpretq_p8_u32", "arguments": [ "uint32x4_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -86663,12 +290840,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_u64", + "name": "vreinterpretq_p8_u64", "arguments": [ "uint64x2_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -86688,12 +290865,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u16_u8", + "name": "vreinterpretq_p8_u8", "arguments": [ "uint8x16_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { @@ -86713,12 +290890,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_f16", + "name": "vreinterpretq_s16_f16", "arguments": [ "float16x8_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -86738,12 +290915,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_f32", + "name": "vreinterpretq_s16_f32", "arguments": [ "float32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -86763,12 +290940,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_f64", + "name": "vreinterpretq_s16_f64", "arguments": [ "float64x2_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -86786,12 +290963,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_p128", + "name": "vreinterpretq_s16_p128", "arguments": [ "poly128_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -86810,12 +290987,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_p16", + "name": "vreinterpretq_s16_p16", "arguments": [ "poly16x8_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -86835,12 +291012,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_p64", + "name": "vreinterpretq_s16_p64", "arguments": [ "poly64x2_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -86859,12 +291036,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_p8", + "name": "vreinterpretq_s16_p8", "arguments": [ "poly8x16_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -86884,16 +291061,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_s16", + "name": "vreinterpretq_s16_s32", "arguments": [ - "int16x8_t a" + "int32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4S" } }, "Architectures": [ @@ -86909,16 +291086,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_s32", + "name": "vreinterpretq_s16_s64", "arguments": [ - "int32x4_t a" + "int64x2_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.2D" } }, "Architectures": [ @@ -86934,16 +291111,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_s64", + "name": "vreinterpretq_s16_s8", "arguments": [ - "int64x2_t a" + "int8x16_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.16B" } }, "Architectures": [ @@ -86959,16 +291136,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_s8", + "name": "vreinterpretq_s16_u16", "arguments": [ - "int8x16_t a" + "uint16x8_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.8H" } }, "Architectures": [ @@ -86984,16 +291161,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_u16", + "name": "vreinterpretq_s16_u32", "arguments": [ - "uint16x8_t a" + "uint32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.4S" } }, "Architectures": [ @@ -87009,12 +291186,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_u64", + "name": "vreinterpretq_s16_u64", "arguments": [ "uint64x2_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -87034,12 +291211,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u32_u8", + "name": "vreinterpretq_s16_u8", "arguments": [ "uint8x16_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -87059,12 +291236,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_f16", + "name": "vreinterpretq_s32_f16", "arguments": [ "float16x8_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87084,12 +291261,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_f32", + "name": "vreinterpretq_s32_f32", "arguments": [ "float32x4_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87109,12 +291286,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_f64", + "name": "vreinterpretq_s32_f64", "arguments": [ "float64x2_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87132,12 +291309,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_p128", + "name": "vreinterpretq_s32_p128", "arguments": [ "poly128_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87156,12 +291333,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_p16", + "name": "vreinterpretq_s32_p16", "arguments": [ "poly16x8_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87181,12 +291358,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_p64", + "name": "vreinterpretq_s32_p64", "arguments": [ "poly64x2_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87205,12 +291382,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_p8", + "name": "vreinterpretq_s32_p8", "arguments": [ "poly8x16_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87230,12 +291407,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_s16", + "name": "vreinterpretq_s32_s16", "arguments": [ "int16x8_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87255,37 +291432,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_s32", - "arguments": [ - "int32x4_t a" - ], - "return_type": { - "value": "uint64x2_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_s64", + "name": "vreinterpretq_s32_s64", "arguments": [ "int64x2_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87305,12 +291457,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_s8", + "name": "vreinterpretq_s32_s8", "arguments": [ "int8x16_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87330,12 +291482,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_u16", + "name": "vreinterpretq_s32_u16", "arguments": [ "uint16x8_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87355,12 +291507,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_u32", + "name": "vreinterpretq_s32_u32", "arguments": [ "uint32x4_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -87380,41 +291532,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u64_u8", - "arguments": [ - "uint8x16_t a" - ], - "return_type": { - "value": "uint64x2_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_f16", + "name": "vreinterpretq_s32_u64", "arguments": [ - "float16x8_t a" + "uint64x2_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2D" } }, "Architectures": [ @@ -87430,16 +291557,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_f32", + "name": "vreinterpretq_s32_u8", "arguments": [ - "float32x4_t a" + "uint8x16_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.16B" } }, "Architectures": [ @@ -87455,59 +291582,12 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_f64", - "arguments": [ - "float64x2_t a" - ], - "return_type": { - "value": "uint8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_p128", - "arguments": [ - "poly128_t a" - ], - "return_type": { - "value": "uint8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.1Q" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_p16", + "name": "vreinterpretq_s64_f16", "arguments": [ - "poly16x8_t a" + "float16x8_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { @@ -87527,40 +291607,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_p64", - "arguments": [ - "poly64x2_t a" - ], - "return_type": { - "value": "uint8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.2D" - } - }, - "Architectures": [ - "A32", - "A64" - ], - "instructions": [ - [ - "NOP" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_p8", + "name": "vreinterpretq_s64_f32", "arguments": [ - "poly8x16_t a" + "float32x4_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4S" } }, "Architectures": [ @@ -87576,21 +291632,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_s16", + "name": "vreinterpretq_s64_f64", "arguments": [ - "int16x8_t a" + "float64x2_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -87601,20 +291655,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_s32", + "name": "vreinterpretq_s64_p128", "arguments": [ - "int32x4_t a" + "poly128_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.1Q" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -87626,16 +291679,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_s64", + "name": "vreinterpretq_s64_p16", "arguments": [ - "int64x2_t a" + "poly16x8_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.8H" } }, "Architectures": [ @@ -87651,20 +291704,19 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_s8", + "name": "vreinterpretq_s64_p64", "arguments": [ - "int8x16_t a" + "poly64x2_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -87676,16 +291728,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_u16", + "name": "vreinterpretq_s64_p8", "arguments": [ - "uint16x8_t a" + "poly8x16_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.16B" } }, "Architectures": [ @@ -87701,16 +291753,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_u32", + "name": "vreinterpretq_s64_s16", "arguments": [ - "uint32x4_t a" + "int16x8_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vd.8H" } }, "Architectures": [ @@ -87726,16 +291778,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vreinterpretq_u8_u64", + "name": "vreinterpretq_s64_s32", "arguments": [ - "uint64x2_t a" + "int32x4_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.4S" } }, "Architectures": [ @@ -87751,16 +291803,16 @@ }, { "SIMD_ISA": "Neon", - "name": "vrev16_p8", + "name": "vreinterpretq_s64_s8", "arguments": [ - "poly8x8_t vec" + "int8x16_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8B" + "a": { + "register": "Vd.16B" } }, "Architectures": [ @@ -87770,22 +291822,22 @@ ], "instructions": [ [ - "REV16" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev16_s8", + "name": "vreinterpretq_s64_u16", "arguments": [ - "int8x8_t vec" + "uint16x8_t a" ], "return_type": { - "value": "int8x8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8B" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -87795,22 +291847,22 @@ ], "instructions": [ [ - "REV16" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev16_u8", + "name": "vreinterpretq_s64_u32", "arguments": [ - "uint8x8_t vec" + "uint32x4_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8B" + "a": { + "register": "Vd.4S" } }, "Architectures": [ @@ -87820,22 +291872,22 @@ ], "instructions": [ [ - "REV16" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev16q_p8", + "name": "vreinterpretq_s64_u64", "arguments": [ - "poly8x16_t vec" + "uint64x2_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.16B" + "a": { + "register": "Vd.2D" } }, "Architectures": [ @@ -87845,22 +291897,22 @@ ], "instructions": [ [ - "REV16" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev16q_s8", + "name": "vreinterpretq_s64_u8", "arguments": [ - "int8x16_t vec" + "uint8x16_t a" ], "return_type": { - "value": "int8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.16B" + "a": { + "register": "Vd.16B" } }, "Architectures": [ @@ -87870,22 +291922,22 @@ ], "instructions": [ [ - "REV16" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev16q_u8", + "name": "vreinterpretq_s8_f16", "arguments": [ - "uint8x16_t vec" + "float16x8_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.16B" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -87895,22 +291947,22 @@ ], "instructions": [ [ - "REV16" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32_p16", + "name": "vreinterpretq_s8_f32", "arguments": [ - "poly16x4_t vec" + "float32x4_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4H" + "a": { + "register": "Vd.4S" } }, "Architectures": [ @@ -87920,72 +291972,69 @@ ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32_p8", + "name": "vreinterpretq_s8_f64", "arguments": [ - "poly8x8_t vec" + "float64x2_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8B" + "a": { + "register": "Vd.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32_s16", + "name": "vreinterpretq_s8_p128", "arguments": [ - "int16x4_t vec" + "poly128_t a" ], "return_type": { - "value": "int16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4H" + "a": { + "register": "Vd.1Q" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32_s8", + "name": "vreinterpretq_s8_p16", "arguments": [ - "int8x8_t vec" + "poly16x8_t a" ], "return_type": { - "value": "int8x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8B" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -87995,47 +292044,46 @@ ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32_u16", + "name": "vreinterpretq_s8_p64", "arguments": [ - "uint16x4_t vec" + "poly64x2_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4H" + "a": { + "register": "Vd.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32_u8", + "name": "vreinterpretq_s8_p8", "arguments": [ - "uint8x8_t vec" + "poly8x16_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8B" + "a": { + "register": "Vd.16B" } }, "Architectures": [ @@ -88045,22 +292093,22 @@ ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32q_p16", + "name": "vreinterpretq_s8_s16", "arguments": [ - "poly16x8_t vec" + "int16x8_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8H" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -88070,22 +292118,22 @@ ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32q_p8", + "name": "vreinterpretq_s8_s32", "arguments": [ - "poly8x16_t vec" + "int32x4_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.16B" + "a": { + "register": "Vd.4S" } }, "Architectures": [ @@ -88095,22 +292143,22 @@ ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32q_s16", + "name": "vreinterpretq_s8_s64", "arguments": [ - "int16x8_t vec" + "int64x2_t a" ], "return_type": { - "value": "int16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8H" + "a": { + "register": "Vd.2D" } }, "Architectures": [ @@ -88120,22 +292168,22 @@ ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32q_s8", + "name": "vreinterpretq_s8_u16", "arguments": [ - "int8x16_t vec" + "uint16x8_t a" ], "return_type": { "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.16B" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -88145,22 +292193,22 @@ ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32q_u16", + "name": "vreinterpretq_s8_u32", "arguments": [ - "uint16x8_t vec" + "uint32x4_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8H" + "a": { + "register": "Vd.4S" } }, "Architectures": [ @@ -88170,22 +292218,22 @@ ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev32q_u8", + "name": "vreinterpretq_s8_u64", "arguments": [ - "uint8x16_t vec" + "uint64x2_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.16B" + "a": { + "register": "Vd.2D" } }, "Architectures": [ @@ -88195,22 +292243,22 @@ ], "instructions": [ [ - "REV32" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_f16", + "name": "vreinterpretq_s8_u8", "arguments": [ - "float16x4_t vec" + "uint8x16_t a" ], "return_type": { - "value": "float16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4H" + "a": { + "register": "Vd.16B" } }, "Architectures": [ @@ -88220,22 +292268,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_f32", + "name": "vreinterpretq_u16_f16", "arguments": [ - "float32x2_t vec" + "float16x8_t a" ], "return_type": { - "value": "float32x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.2S" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -88245,22 +292293,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_p16", + "name": "vreinterpretq_u16_f32", "arguments": [ - "poly16x4_t vec" + "float32x4_t a" ], "return_type": { - "value": "poly16x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4H" + "a": { + "register": "Vd.4S" } }, "Architectures": [ @@ -88270,72 +292318,69 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_p8", + "name": "vreinterpretq_u16_f64", "arguments": [ - "poly8x8_t vec" + "float64x2_t a" ], "return_type": { - "value": "poly8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8B" + "a": { + "register": "Vd.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_s16", + "name": "vreinterpretq_u16_p128", "arguments": [ - "int16x4_t vec" + "poly128_t a" ], "return_type": { - "value": "int16x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4H" + "a": { + "register": "Vd.1Q" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_s32", + "name": "vreinterpretq_u16_p16", "arguments": [ - "int32x2_t vec" + "poly16x8_t a" ], "return_type": { - "value": "int32x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.2S" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -88345,47 +292390,46 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_s8", + "name": "vreinterpretq_u16_p64", "arguments": [ - "int8x8_t vec" + "poly64x2_t a" ], "return_type": { - "value": "int8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8B" + "a": { + "register": "Vd.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_u16", + "name": "vreinterpretq_u16_p8", "arguments": [ - "uint16x4_t vec" + "poly8x16_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4H" + "a": { + "register": "Vd.16B" } }, "Architectures": [ @@ -88395,22 +292439,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_u32", + "name": "vreinterpretq_u16_s16", "arguments": [ - "uint32x2_t vec" + "int16x8_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.2S" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -88420,22 +292464,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64_u8", + "name": "vreinterpretq_u16_s32", "arguments": [ - "uint8x8_t vec" + "int32x4_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8B" + "a": { + "register": "Vd.4S" } }, "Architectures": [ @@ -88445,22 +292489,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_f16", + "name": "vreinterpretq_u16_s64", "arguments": [ - "float16x8_t vec" + "int64x2_t a" ], "return_type": { - "value": "float16x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8H" + "a": { + "register": "Vd.2D" } }, "Architectures": [ @@ -88470,22 +292514,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_f32", + "name": "vreinterpretq_u16_s8", "arguments": [ - "float32x4_t vec" + "int8x16_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4S" + "a": { + "register": "Vd.16B" } }, "Architectures": [ @@ -88495,22 +292539,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_p16", + "name": "vreinterpretq_u16_u32", "arguments": [ - "poly16x8_t vec" + "uint32x4_t a" ], "return_type": { - "value": "poly16x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8H" + "a": { + "register": "Vd.4S" } }, "Architectures": [ @@ -88520,22 +292564,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_p8", + "name": "vreinterpretq_u16_u64", "arguments": [ - "poly8x16_t vec" + "uint64x2_t a" ], "return_type": { - "value": "poly8x16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.16B" + "a": { + "register": "Vd.2D" } }, "Architectures": [ @@ -88545,22 +292589,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_s16", + "name": "vreinterpretq_u16_u8", "arguments": [ - "int16x8_t vec" + "uint8x16_t a" ], "return_type": { - "value": "int16x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8H" + "a": { + "register": "Vd.16B" } }, "Architectures": [ @@ -88570,22 +292614,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_s32", + "name": "vreinterpretq_u32_f16", "arguments": [ - "int32x4_t vec" + "float16x8_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4S" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -88595,22 +292639,22 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_s8", + "name": "vreinterpretq_u32_f32", "arguments": [ - "int8x16_t vec" + "float32x4_t a" ], "return_type": { - "value": "int8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.16B" + "a": { + "register": "Vd.4S" } }, "Architectures": [ @@ -88620,72 +292664,69 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_u16", + "name": "vreinterpretq_u32_f64", "arguments": [ - "uint16x8_t vec" + "float64x2_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.8H" + "a": { + "register": "Vd.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_u32", + "name": "vreinterpretq_u32_p128", "arguments": [ - "uint32x4_t vec" + "poly128_t a" ], "return_type": { "value": "uint32x4_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.4S" + "a": { + "register": "Vd.1Q" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrev64q_u8", + "name": "vreinterpretq_u32_p16", "arguments": [ - "uint8x16_t vec" + "poly16x8_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "vec": { - "register": "Vn.16B" + "a": { + "register": "Vd.8H" } }, "Architectures": [ @@ -88695,55 +292736,46 @@ ], "instructions": [ [ - "REV64" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhadd_s16", + "name": "vreinterpretq_u32_p64", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "poly64x2_t a" ], "return_type": { - "value": "int16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "register": "Vd.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SRHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhadd_s32", + "name": "vreinterpretq_u32_p8", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "poly8x16_t a" ], "return_type": { - "value": "int32x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" + "register": "Vd.16B" } }, "Architectures": [ @@ -88753,26 +292785,22 @@ ], "instructions": [ [ - "SRHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhadd_s8", + "name": "vreinterpretq_u32_s16", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "int16x8_t a" ], "return_type": { - "value": "int8x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "register": "Vd.8H" } }, "Architectures": [ @@ -88782,26 +292810,22 @@ ], "instructions": [ [ - "SRHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhadd_u16", + "name": "vreinterpretq_u32_s32", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "int32x4_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" + "register": "Vd.4S" } }, "Architectures": [ @@ -88811,26 +292835,22 @@ ], "instructions": [ [ - "URHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhadd_u32", + "name": "vreinterpretq_u32_s64", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "int64x2_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" + "register": "Vd.2D" } }, "Architectures": [ @@ -88840,26 +292860,22 @@ ], "instructions": [ [ - "URHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhadd_u8", + "name": "vreinterpretq_u32_s8", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "int8x16_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.8B" + "register": "Vd.16B" } }, "Architectures": [ @@ -88869,26 +292885,22 @@ ], "instructions": [ [ - "URHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhaddq_s16", + "name": "vreinterpretq_u32_u16", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "uint16x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vd.8H" } }, "Architectures": [ @@ -88898,26 +292910,22 @@ ], "instructions": [ [ - "SRHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhaddq_s32", + "name": "vreinterpretq_u32_u64", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "uint64x2_t a" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vd.2D" } }, "Architectures": [ @@ -88927,26 +292935,22 @@ ], "instructions": [ [ - "SRHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhaddq_s8", + "name": "vreinterpretq_u32_u8", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "uint8x16_t a" ], "return_type": { - "value": "int8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "register": "Vd.16B" } }, "Architectures": [ @@ -88956,26 +292960,22 @@ ], "instructions": [ [ - "SRHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhaddq_u16", + "name": "vreinterpretq_u64_f16", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "float16x8_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vd.8H" } }, "Architectures": [ @@ -88985,26 +292985,22 @@ ], "instructions": [ [ - "URHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhaddq_u32", + "name": "vreinterpretq_u64_f32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "float32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vd.4S" } }, "Architectures": [ @@ -89014,350 +293010,368 @@ ], "instructions": [ [ - "URHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrhaddq_u8", + "name": "vreinterpretq_u64_f64", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "float64x2_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "register": "Vd.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URHADD" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd32x_f32", + "name": "vreinterpretq_u64_p128", "arguments": [ - "float32x2_t a" + "poly128_t a" ], "return_type": { - "value": "float32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.1Q" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRINT32X" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd32x_f64", + "name": "vreinterpretq_u64_p16", "arguments": [ - "float64x1_t a" + "poly16x8_t a" ], "return_type": { - "value": "float64x1_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT32X" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd32xq_f32", + "name": "vreinterpretq_u64_p64", "arguments": [ - "float32x4_t a" + "poly64x2_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.2D" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRINT32X" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd32xq_f64", + "name": "vreinterpretq_u64_p8", "arguments": [ - "float64x2_t a" + "poly8x16_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT32X" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd32z_f32", + "name": "vreinterpretq_u64_s16", "arguments": [ - "float32x2_t a" + "int16x8_t a" ], "return_type": { - "value": "float32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT32Z" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd32z_f64", + "name": "vreinterpretq_u64_s32", "arguments": [ - "float64x1_t a" + "int32x4_t a" ], "return_type": { - "value": "float64x1_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT32Z" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd32zq_f32", + "name": "vreinterpretq_u64_s64", "arguments": [ - "float32x4_t a" + "int64x2_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT32Z" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd32zq_f64", + "name": "vreinterpretq_u64_s8", "arguments": [ - "float64x2_t a" + "int8x16_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT32Z" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd64x_f32", + "name": "vreinterpretq_u64_u16", "arguments": [ - "float32x2_t a" + "uint16x8_t a" ], "return_type": { - "value": "float32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT64X" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd64x_f64", + "name": "vreinterpretq_u64_u32", "arguments": [ - "float64x1_t a" + "uint32x4_t a" ], "return_type": { - "value": "float64x1_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT64X" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd64xq_f32", + "name": "vreinterpretq_u64_u8", "arguments": [ - "float32x4_t a" + "uint8x16_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT64X" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd64xq_f64", + "name": "vreinterpretq_u8_f16", "arguments": [ - "float64x2_t a" + "float16x8_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT64X" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd64z_f32", + "name": "vreinterpretq_u8_f32", "arguments": [ - "float32x2_t a" + "float32x4_t a" ], "return_type": { - "value": "float32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT64Z" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd64z_f64", + "name": "vreinterpretq_u8_f64", "arguments": [ - "float64x1_t a" + "float64x2_t a" ], "return_type": { - "value": "float64x1_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.2D" } }, "Architectures": [ @@ -89365,68 +293379,71 @@ ], "instructions": [ [ - "FRINT64Z" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd64zq_f32", + "name": "vreinterpretq_u8_p128", "arguments": [ - "float32x4_t a" + "poly128_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.1Q" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRINT64Z" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd64zq_f64", + "name": "vreinterpretq_u8_p16", "arguments": [ - "float64x2_t a" + "poly16x8_t a" ], "return_type": { - "value": "float64x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn" + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINT64Z" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd_f16", + "name": "vreinterpretq_u8_p64", "arguments": [ - "float16x4_t a" + "poly64x2_t a" ], "return_type": { - "value": "float16x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.2D" } }, "Architectures": [ @@ -89435,1181 +293452,1256 @@ ], "instructions": [ [ - "FRINTZ" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd_f32", + "name": "vreinterpretq_u8_p8", "arguments": [ - "float32x2_t a" + "poly8x16_t a" ], "return_type": { - "value": "float32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTZ" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnd_f64", + "name": "vreinterpretq_u8_s16", "arguments": [ - "float64x1_t a" + "int16x8_t a" ], "return_type": { - "value": "float64x1_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTZ" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnda_f16", + "name": "vreinterpretq_u8_s32", "arguments": [ - "float16x4_t a" + "int32x4_t a" ], "return_type": { - "value": "float16x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTA" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnda_f32", + "name": "vreinterpretq_u8_s64", "arguments": [ - "float32x2_t a" + "int64x2_t a" ], "return_type": { - "value": "float32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.2D" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTA" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrnda_f64", + "name": "vreinterpretq_u8_s8", "arguments": [ - "float64x1_t a" + "int8x16_t a" ], "return_type": { - "value": "float64x1_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTA" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndah_f16", + "name": "vreinterpretq_u8_u16", "arguments": [ - "float16_t a" + "uint16x8_t a" ], "return_type": { - "value": "float16_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vd.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTA" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndaq_f16", + "name": "vreinterpretq_u8_u32", "arguments": [ - "float16x8_t a" + "uint32x4_t a" ], "return_type": { - "value": "float16x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTA" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndaq_f32", + "name": "vreinterpretq_u8_u64", "arguments": [ - "float32x4_t a" + "uint64x2_t a" ], "return_type": { - "value": "float32x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.2D" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTA" + "NOP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndaq_f64", + "name": "vrev16_p8", "arguments": [ - "float64x2_t a" + "poly8x8_t vec" ], "return_type": { - "value": "float64x2_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "vec": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTA" + "REV16" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndh_f16", + "name": "vrev16_s8", "arguments": [ - "float16_t a" + "int8x8_t vec" ], "return_type": { - "value": "float16_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "vec": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTZ" + "REV16" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndi_f16", + "name": "vrev16_u8", "arguments": [ - "float16x4_t a" + "uint8x8_t vec" ], "return_type": { - "value": "float16x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "vec": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTI" + "REV16" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndi_f32", + "name": "vrev16q_p8", "arguments": [ - "float32x2_t a" + "poly8x16_t vec" ], "return_type": { - "value": "float32x2_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "vec": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTI" + "REV16" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndi_f64", + "name": "vrev16q_s8", "arguments": [ - "float64x1_t a" + "int8x16_t vec" ], "return_type": { - "value": "float64x1_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "vec": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTI" + "REV16" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndih_f16", + "name": "vrev16q_u8", "arguments": [ - "float16_t a" + "uint8x16_t vec" ], "return_type": { - "value": "float16_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "vec": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTI" + "REV16" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndiq_f16", + "name": "vrev32_p16", "arguments": [ - "float16x8_t a" + "poly16x4_t vec" ], "return_type": { - "value": "float16x8_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "vec": { + "register": "Vn.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTI" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndiq_f32", + "name": "vrev32_p8", "arguments": [ - "float32x4_t a" + "poly8x8_t vec" ], "return_type": { - "value": "float32x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "vec": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTI" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndiq_f64", + "name": "vrev32_s16", "arguments": [ - "float64x2_t a" + "int16x4_t vec" ], "return_type": { - "value": "float64x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "vec": { + "register": "Vn.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTI" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndm_f16", + "name": "vrev32_s8", "arguments": [ - "float16x4_t a" + "int8x8_t vec" ], "return_type": { - "value": "float16x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "vec": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTM" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndm_f32", + "name": "vrev32_u16", "arguments": [ - "float32x2_t a" + "uint16x4_t vec" ], "return_type": { - "value": "float32x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "vec": { + "register": "Vn.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTM" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndm_f64", + "name": "vrev32_u8", "arguments": [ - "float64x1_t a" + "uint8x8_t vec" ], "return_type": { - "value": "float64x1_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "vec": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTM" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndmh_f16", + "name": "vrev32q_p16", "arguments": [ - "float16_t a" + "poly16x8_t vec" ], "return_type": { - "value": "float16_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "vec": { + "register": "Vn.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTM" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndmq_f16", + "name": "vrev32q_p8", "arguments": [ - "float16x8_t a" + "poly8x16_t vec" ], "return_type": { - "value": "float16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "vec": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTM" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndmq_f32", + "name": "vrev32q_s16", "arguments": [ - "float32x4_t a" + "int16x8_t vec" ], "return_type": { - "value": "float32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "vec": { + "register": "Vn.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTM" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndmq_f64", + "name": "vrev32q_s8", "arguments": [ - "float64x2_t a" + "int8x16_t vec" ], "return_type": { - "value": "float64x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "vec": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTM" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndn_f16", + "name": "vrev32q_u16", "arguments": [ - "float16x4_t a" + "uint16x8_t vec" ], "return_type": { - "value": "float16x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "vec": { + "register": "Vn.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTN" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndn_f32", + "name": "vrev32q_u8", "arguments": [ - "float32x2_t a" + "uint8x16_t vec" ], "return_type": { - "value": "float32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "vec": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTN" + "REV32" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndn_f64", + "name": "vrev64_f16", "arguments": [ - "float64x1_t a" + "float16x4_t vec" ], "return_type": { - "value": "float64x1_t" + "value": "float16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "vec": { + "register": "Vn.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTN" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndnh_f16", + "name": "vrev64_f32", "arguments": [ - "float16_t a" + "float32x2_t vec" ], "return_type": { - "value": "float16_t" + "value": "float32x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "vec": { + "register": "Vn.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTN" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndnq_f16", + "name": "vrev64_p16", "arguments": [ - "float16x8_t a" + "poly16x4_t vec" ], "return_type": { - "value": "float16x8_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "vec": { + "register": "Vn.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTN" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndnq_f32", + "name": "vrev64_p8", "arguments": [ - "float32x4_t a" + "poly8x8_t vec" ], "return_type": { - "value": "float32x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "vec": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTN" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndnq_f64", + "name": "vrev64_s16", "arguments": [ - "float64x2_t a" + "int16x4_t vec" ], "return_type": { - "value": "float64x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "vec": { + "register": "Vn.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTN" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndns_f32", + "name": "vrev64_s32", "arguments": [ - "float32_t a" + "int32x2_t vec" ], "return_type": { - "value": "float32_t" + "value": "int32x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Sn" + "vec": { + "register": "Vn.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTN" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndp_f16", + "name": "vrev64_s8", "arguments": [ - "float16x4_t a" + "int8x8_t vec" ], "return_type": { - "value": "float16x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "vec": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTP" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndp_f32", + "name": "vrev64_u16", "arguments": [ - "float32x2_t a" + "uint16x4_t vec" ], "return_type": { - "value": "float32x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "vec": { + "register": "Vn.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTP" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndp_f64", + "name": "vrev64_u32", "arguments": [ - "float64x1_t a" + "uint32x2_t vec" ], "return_type": { - "value": "float64x1_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "vec": { + "register": "Vn.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTP" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndph_f16", + "name": "vrev64_u8", "arguments": [ - "float16_t a" + "uint8x8_t vec" ], "return_type": { - "value": "float16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "vec": { + "register": "Vn.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTP" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndpq_f16", + "name": "vrev64q_f16", "arguments": [ - "float16x8_t a" + "float16x8_t vec" ], "return_type": { "value": "float16x8_t" }, "Arguments_Preparation": { - "a": { + "vec": { "register": "Vn.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTP" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndpq_f32", + "name": "vrev64q_f32", "arguments": [ - "float32x4_t a" + "float32x4_t vec" ], "return_type": { "value": "float32x4_t" }, "Arguments_Preparation": { - "a": { + "vec": { "register": "Vn.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTP" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndpq_f64", + "name": "vrev64q_p16", "arguments": [ - "float64x2_t a" + "poly16x8_t vec" ], "return_type": { - "value": "float64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "vec": { + "register": "Vn.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTP" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndq_f16", + "name": "vrev64q_p8", "arguments": [ - "float16x8_t a" + "poly8x16_t vec" ], "return_type": { - "value": "float16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "vec": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTZ" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndq_f32", + "name": "vrev64q_s16", "arguments": [ - "float32x4_t a" + "int16x8_t vec" ], "return_type": { - "value": "float32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "vec": { + "register": "Vn.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTZ" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndq_f64", + "name": "vrev64q_s32", "arguments": [ - "float64x2_t a" + "int32x4_t vec" ], "return_type": { - "value": "float64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "vec": { + "register": "Vn.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTZ" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndx_f16", + "name": "vrev64q_s8", "arguments": [ - "float16x4_t a" + "int8x16_t vec" ], "return_type": { - "value": "float16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "vec": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTX" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndx_f32", + "name": "vrev64q_u16", "arguments": [ - "float32x2_t a" + "uint16x8_t vec" ], "return_type": { - "value": "float32x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "vec": { + "register": "Vn.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTX" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndx_f64", + "name": "vrev64q_u32", "arguments": [ - "float64x1_t a" + "uint32x4_t vec" ], "return_type": { - "value": "float64x1_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "vec": { + "register": "Vn.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTX" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndxh_f16", + "name": "vrev64q_u8", "arguments": [ - "float16_t a" + "uint8x16_t vec" ], "return_type": { - "value": "float16_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "vec": { + "register": "Vn.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTX" + "REV64" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndxq_f16", + "name": "vrhadd_s16", "arguments": [ - "float16x8_t a" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "float16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTX" + "SRHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndxq_f32", + "name": "vrhadd_s32", "arguments": [ - "float32x4_t a" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "float32x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FRINTX" + "SRHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrndxq_f64", + "name": "vrhadd_s8", "arguments": [ - "float64x2_t a" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "float64x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FRINTX" + "SRHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshl_s16", + "name": "vrhadd_u16", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "int16x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { @@ -90626,19 +294718,19 @@ ], "instructions": [ [ - "SRSHL" + "URHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshl_s32", + "name": "vrhadd_u32", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "int32x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { @@ -90655,26 +294747,26 @@ ], "instructions": [ [ - "SRSHL" + "URHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshl_s64", + "name": "vrhadd_u8", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "int64x1_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.8B" }, "b": { - "register": "Dm" + "register": "Vm.8B" } }, "Architectures": [ @@ -90684,26 +294776,26 @@ ], "instructions": [ [ - "SRSHL" + "URHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshl_s8", + "name": "vrhaddq_s16", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int8x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, "b": { - "register": "Vm.8B" + "register": "Vm.8H" } }, "Architectures": [ @@ -90713,26 +294805,26 @@ ], "instructions": [ [ - "SRSHL" + "SRHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshl_u16", + "name": "vrhaddq_s32", "arguments": [ - "uint16x4_t a", - "int16x4_t b" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" }, "b": { - "register": "Vm.4H" + "register": "Vm.4S" } }, "Architectures": [ @@ -90742,26 +294834,26 @@ ], "instructions": [ [ - "URSHL" + "SRHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshl_u32", + "name": "vrhaddq_s8", "arguments": [ - "uint32x2_t a", - "int32x2_t b" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.16B" }, "b": { - "register": "Vm.2S" + "register": "Vm.16B" } }, "Architectures": [ @@ -90771,26 +294863,26 @@ ], "instructions": [ [ - "URSHL" + "SRHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshl_u64", + "name": "vrhaddq_u16", "arguments": [ - "uint64x1_t a", - "int64x1_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "uint64x1_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.8H" }, "b": { - "register": "Dm" + "register": "Vm.8H" } }, "Architectures": [ @@ -90800,26 +294892,26 @@ ], "instructions": [ [ - "URSHL" + "URHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshl_u8", + "name": "vrhaddq_u32", "arguments": [ - "uint8x8_t a", - "int8x8_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" }, "b": { - "register": "Vm.8B" + "register": "Vm.4S" } }, "Architectures": [ @@ -90829,53 +294921,51 @@ ], "instructions": [ [ - "URSHL" + "URHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshld_s64", + "name": "vrhaddq_u8", "arguments": [ - "int64_t a", - "int64_t b" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "int64_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.16B" }, "b": { - "register": "Dm" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SRSHL" + "URHADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshld_u64", + "name": "vrnd32x_f32", "arguments": [ - "uint64_t a", - "int64_t b" + "float32x2_t a" ], "return_type": { - "value": "uint64_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "b": { - "register": "Dm" + "register": "Vn" } }, "Architectures": [ @@ -90883,527 +294973,415 @@ ], "instructions": [ [ - "URSHL" + "FRINT32X" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshlq_s16", + "name": "vrnd32x_f64", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "float64x1_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHL" + "FRINT32X" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshlq_s32", + "name": "vrnd32xq_f32", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "float32x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHL" + "FRINT32X" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshlq_s64", + "name": "vrnd32xq_f64", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "float64x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHL" + "FRINT32X" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshlq_s8", + "name": "vrnd32z_f32", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "float32x2_t a" ], "return_type": { - "value": "int8x16_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHL" + "FRINT32Z" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshlq_u16", + "name": "vrnd32z_f64", "arguments": [ - "uint16x8_t a", - "int16x8_t b" + "float64x1_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSHL" + "FRINT32Z" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshlq_u32", + "name": "vrnd32zq_f32", "arguments": [ - "uint32x4_t a", - "int32x4_t b" + "float32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSHL" + "FRINT32Z" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshlq_u64", + "name": "vrnd32zq_f64", "arguments": [ - "uint64x2_t a", - "int64x2_t b" + "float64x2_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSHL" + "FRINT32Z" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshlq_u8", + "name": "vrnd64x_f32", "arguments": [ - "uint8x16_t a", - "int8x16_t b" + "float32x2_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm.16B" + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSHL" + "FRINT64X" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshr_n_s16", + "name": "vrnd64x_f64", "arguments": [ - "int16x4_t a", - "const int n" + "float64x1_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "n": { - "minimum": 1, - "maximum": 16 + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHR" + "FRINT64X" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshr_n_s32", + "name": "vrnd64xq_f32", "arguments": [ - "int32x2_t a", - "const int n" + "float32x4_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "n": { - "minimum": 1, - "maximum": 32 + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHR" + "FRINT64X" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshr_n_s64", + "name": "vrnd64xq_f64", "arguments": [ - "int64x1_t a", - "const int n" + "float64x2_t a" ], "return_type": { - "value": "int64x1_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHR" + "FRINT64X" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshr_n_s8", + "name": "vrnd64z_f32", "arguments": [ - "int8x8_t a", - "const int n" + "float32x2_t a" ], "return_type": { - "value": "int8x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHR" + "FRINT64Z" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshr_n_u16", + "name": "vrnd64z_f64", "arguments": [ - "uint16x4_t a", - "const int n" + "float64x1_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" - }, - "n": { - "minimum": 1, - "maximum": 16 + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSHR" + "FRINT64Z" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshr_n_u32", + "name": "vrnd64zq_f32", "arguments": [ - "uint32x2_t a", - "const int n" + "float32x4_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" - }, - "n": { - "minimum": 1, - "maximum": 32 + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSHR" + "FRINT64Z" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshr_n_u64", + "name": "vrnd64zq_f64", "arguments": [ - "uint64x1_t a", - "const int n" + "float64x2_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Vn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSHR" + "FRINT64Z" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshr_n_u8", + "name": "vrnd_f16", "arguments": [ - "uint8x8_t a", - "const int n" + "float16x4_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Vn.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "URSHR" + "FRINTZ" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrd_n_s64", + "name": "vrnd_f32", "arguments": [ - "int64_t a", - "const int n" + "float32x2_t a" ], "return_type": { - "value": "int64_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Vn.2S" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "SRSHR" + "FRINTZ" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrd_n_u64", + "name": "vrnd_f64", "arguments": [ - "uint64_t a", - "const int n" + "float64x1_t a" ], "return_type": { - "value": "uint64_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { "register": "Dn" - }, - "n": { - "minimum": 1, - "maximum": 64 } }, "Architectures": [ @@ -91411,95 +295389,70 @@ ], "instructions": [ [ - "URSHR" + "FRINTZ" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_high_n_s16", + "name": "vrnda_f16", "arguments": [ - "int8x8_t r", - "int16x8_t a", - "const int n" + "float16x4_t a" ], "return_type": { - "value": "int8x16_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 8 - }, - "r": { - "register": "Vd.8B" + "register": "Vn.4H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "RSHRN2" + "FRINTA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_high_n_s32", + "name": "vrnda_f32", "arguments": [ - "int16x4_t r", - "int32x4_t a", - "const int n" + "float32x2_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 16 - }, - "r": { - "register": "Vd.4H" + "register": "Vn.2S" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "RSHRN2" + "FRINTA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_high_n_s64", + "name": "vrnda_f64", "arguments": [ - "int32x2_t r", - "int64x2_t a", - "const int n" + "float64x1_t a" ], "return_type": { - "value": "int32x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "n": { - "minimum": 1, - "maximum": 32 - }, - "r": { - "register": "Vd.2S" + "register": "Dn" } }, "Architectures": [ @@ -91507,538 +295460,424 @@ ], "instructions": [ [ - "RSHRN2" + "FRINTA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_high_n_u16", + "name": "vrndah_f16", "arguments": [ - "uint8x8_t r", - "uint16x8_t a", - "const int n" + "float16_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 8 - }, - "r": { - "register": "Vd.8B" + "register": "Hn" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "RSHRN2" + "FRINTA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_high_n_u32", + "name": "vrndaq_f16", "arguments": [ - "uint16x4_t r", - "uint32x4_t a", - "const int n" + "float16x8_t a" ], "return_type": { - "value": "uint16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 16 - }, - "r": { - "register": "Vd.4H" + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "RSHRN2" + "FRINTA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_high_n_u64", + "name": "vrndaq_f32", "arguments": [ - "uint32x2_t r", - "uint64x2_t a", - "const int n" + "float32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "n": { - "minimum": 1, - "maximum": 32 - }, - "r": { - "register": "32(Vd)" + "register": "Vn.4S" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "RSHRN2" + "FRINTA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_n_s16", + "name": "vrndaq_f64", "arguments": [ - "int16x8_t a", - "const int n" + "float64x2_t a" ], "return_type": { - "value": "int8x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "RSHRN" + "FRINTA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_n_s32", + "name": "vrndh_f16", "arguments": [ - "int32x4_t a", - "const int n" + "float16_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 16 + "register": "Hn" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "RSHRN" + "FRINTZ" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_n_s64", + "name": "vrndi_f16", "arguments": [ - "int64x2_t a", - "const int n" + "float16x4_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "n": { - "minimum": 1, - "maximum": 32 + "register": "Vn.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "RSHRN" + "FRINTI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_n_u16", + "name": "vrndi_f32", "arguments": [ - "uint16x8_t a", - "const int n" + "float32x2_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Vn.2S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "RSHRN" + "FRINTI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_n_u32", + "name": "vrndi_f64", "arguments": [ - "uint32x4_t a", - "const int n" + "float64x1_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 16 + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "RSHRN" + "FRINTI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrn_n_u64", + "name": "vrndih_f16", "arguments": [ - "uint64x2_t a", - "const int n" + "float16_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "n": { - "minimum": 1, - "maximum": 32 + "register": "Hn" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "RSHRN" + "FRINTI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrq_n_s16", + "name": "vrndiq_f16", "arguments": [ - "int16x8_t a", - "const int n" + "float16x8_t a" ], "return_type": { - "value": "int16x8_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 16 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHR" + "FRINTI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrq_n_s32", + "name": "vrndiq_f32", "arguments": [ - "int32x4_t a", - "const int n" + "float32x4_t a" ], "return_type": { - "value": "int32x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 32 } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SRSHR" + "FRINTI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrq_n_s64", + "name": "vrndiq_f64", "arguments": [ - "int64x2_t a", - "const int n" + "float64x2_t a" ], "return_type": { - "value": "int64x2_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2D" - }, - "n": { - "minimum": 1, - "maximum": 64 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSHR" + "FRINTI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrq_n_s8", + "name": "vrndm_f16", "arguments": [ - "int8x16_t a", - "const int n" + "float16x4_t a" ], "return_type": { - "value": "int8x16_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Vn.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SRSHR" + "FRINTM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrq_n_u16", + "name": "vrndm_f32", "arguments": [ - "uint16x8_t a", - "const int n" + "float32x2_t a" ], "return_type": { - "value": "uint16x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 16 + "register": "Vn.2S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "URSHR" + "FRINTM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrq_n_u32", + "name": "vrndm_f64", "arguments": [ - "uint32x4_t a", - "const int n" + "float64x1_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 32 + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSHR" + "FRINTM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrq_n_u64", + "name": "vrndmh_f16", "arguments": [ - "uint64x2_t a", - "const int n" + "float16_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Hn" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "URSHR" + "FRINTM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrshrq_n_u8", + "name": "vrndmq_f16", "arguments": [ - "uint8x16_t a", - "const int n" + "float16x8_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Vn.8H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "URSHR" + "FRINTM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrte_f16", + "name": "vrndmq_f32", "arguments": [ - "float16x4_t a" + "float32x4_t a" ], "return_type": { - "value": "float16x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" } }, "Architectures": [ @@ -92047,66 +295886,65 @@ ], "instructions": [ [ - "FRSQRTE" + "FRINTM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrte_f32", + "name": "vrndmq_f64", "arguments": [ - "float32x2_t a" + "float64x2_t a" ], "return_type": { - "value": "float32x2_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FRSQRTE" + "FRINTM" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrte_f64", + "name": "vrndn_f16", "arguments": [ - "float64x1_t a" + "float16x4_t a" ], "return_type": { - "value": "float64x1_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.4H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRSQRTE" + "FRINTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrte_u32", + "name": "vrndn_f32", "arguments": [ - "uint32x2_t a" + "float32x2_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { @@ -92114,24 +295952,23 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "URSQRTE" + "FRINTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrted_f64", + "name": "vrndn_f64", "arguments": [ - "float64_t a" + "float64x1_t a" ], "return_type": { - "value": "float64_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { @@ -92139,17 +295976,18 @@ } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRSQRTE" + "FRINTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrteh_f16", + "name": "vrndnh_f16", "arguments": [ "float16_t a" ], @@ -92162,17 +296000,18 @@ } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRSQRTE" + "FRINTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrteq_f16", + "name": "vrndnq_f16", "arguments": [ "float16x8_t a" ], @@ -92190,13 +296029,13 @@ ], "instructions": [ [ - "FRSQRTE" + "FRINTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrteq_f32", + "name": "vrndnq_f32", "arguments": [ "float32x4_t a" ], @@ -92209,19 +296048,18 @@ } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "FRSQRTE" + "FRINTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrteq_f64", + "name": "vrndnq_f64", "arguments": [ "float64x2_t a" ], @@ -92234,42 +296072,18 @@ } }, "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FRSQRTE" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vrsqrteq_u32", - "arguments": [ - "uint32x4_t a" - ], - "return_type": { - "value": "uint32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - } - }, - "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "URSQRTE" + "FRINTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrtes_f32", + "name": "vrndns_f32", "arguments": [ "float32_t a" ], @@ -92282,20 +296096,20 @@ } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRSQRTE" + "FRINTN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrts_f16", + "name": "vrndp_f16", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "float16x4_t a" ], "return_type": { "value": "float16x4_t" @@ -92303,9 +296117,6 @@ "Arguments_Preparation": { "a": { "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" } }, "Architectures": [ @@ -92314,16 +296125,15 @@ ], "instructions": [ [ - "FRSQRTS" + "FRINTP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrts_f32", + "name": "vrndp_f32", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "float32x2_t a" ], "return_type": { "value": "float32x2_t" @@ -92331,28 +296141,23 @@ "Arguments_Preparation": { "a": { "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "FRSQRTS" + "FRINTP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrts_f64", + "name": "vrndp_f64", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "float64x1_t a" ], "return_type": { "value": "float64x1_t" @@ -92360,9 +296165,6 @@ "Arguments_Preparation": { "a": { "register": "Dn" - }, - "b": { - "register": "Dm" } }, "Architectures": [ @@ -92370,80 +296172,70 @@ ], "instructions": [ [ - "FRSQRTS" + "FRINTP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrtsd_f64", + "name": "vrndph_f16", "arguments": [ - "float64_t a", - "float64_t b" + "float16_t a" ], "return_type": { - "value": "float64_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" - }, - "b": { - "register": "Dm" + "register": "Hn" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRSQRTS" + "FRINTP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrtsh_f16", + "name": "vrndpq_f16", "arguments": [ - "float16_t a", - "float16_t b" + "float16x8_t a" ], "return_type": { - "value": "float16_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" - }, - "b": { - "register": "Hm" + "register": "Vn.8H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRSQRTS" + "FRINTP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrtsq_f16", + "name": "vrndpq_f32", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "float32x4_t a" ], "return_type": { - "value": "float16x8_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" + "register": "Vn.4S" } }, "Architectures": [ @@ -92452,450 +296244,344 @@ ], "instructions": [ [ - "FRSQRTS" + "FRINTP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrtsq_f32", + "name": "vrndpq_f64", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "float64x2_t a" ], "return_type": { - "value": "float32x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "FRSQRTS" + "FRINTP" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrtsq_f64", + "name": "vrndq_f16", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "float16x8_t a" ], "return_type": { - "value": "float64x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" + "register": "Vn.8H" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRSQRTS" + "FRINTZ" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsqrtss_f32", + "name": "vrndq_f32", "arguments": [ - "float32_t a", - "float32_t b" + "float32x4_t a" ], "return_type": { - "value": "float32_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Sn" - }, - "b": { - "register": "Sm" + "register": "Vn.4S" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "FRSQRTS" + "FRINTZ" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsra_n_s16", + "name": "vrndq_f64", "arguments": [ - "int16x4_t a", - "int16x4_t b", - "const int n" + "float64x2_t a" ], "return_type": { - "value": "int16x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" - }, - "n": { - "minimum": 1, - "maximum": 16 + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSRA" + "FRINTZ" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsra_n_s32", + "name": "vrndx_f16", "arguments": [ - "int32x2_t a", - "int32x2_t b", - "const int n" + "float16x4_t a" ], "return_type": { - "value": "int32x2_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" - }, - "n": { - "minimum": 1, - "maximum": 32 + "register": "Vn.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SRSRA" + "FRINTX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsra_n_s64", + "name": "vrndx_f32", "arguments": [ - "int64x1_t a", - "int64x1_t b", - "const int n" + "float32x2_t a" ], "return_type": { - "value": "int64x1_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { - "register": "Dn" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Vn.2S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SRSRA" + "FRINTX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsra_n_s8", + "name": "vrndx_f64", "arguments": [ - "int8x8_t a", - "int8x8_t b", - "const int n" + "float64x1_t a" ], "return_type": { - "value": "int8x8_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.8B" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRSRA" + "FRINTX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsra_n_u16", + "name": "vrndxh_f16", "arguments": [ - "uint16x4_t a", - "uint16x4_t b", - "const int n" + "float16_t a" ], "return_type": { - "value": "uint16x4_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" - }, - "n": { - "minimum": 1, - "maximum": 16 + "register": "Hn" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "URSRA" + "FRINTX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsra_n_u32", + "name": "vrndxq_f16", "arguments": [ - "uint32x2_t a", - "uint32x2_t b", - "const int n" + "float16x8_t a" ], "return_type": { - "value": "uint32x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" - }, - "n": { - "minimum": 1, - "maximum": 32 + "register": "Vn.8H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "URSRA" + "FRINTX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsra_n_u64", + "name": "vrndxq_f32", "arguments": [ - "uint64x1_t a", - "uint64x1_t b", - "const int n" + "float32x4_t a" ], "return_type": { - "value": "uint64x1_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { - "register": "Dn" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Vn.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "URSRA" + "FRINTX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsra_n_u8", + "name": "vrndxq_f64", "arguments": [ - "uint8x8_t a", - "uint8x8_t b", - "const int n" + "float64x2_t a" ], "return_type": { - "value": "uint8x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.8B" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSRA" + "FRINTX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsrad_n_s64", + "name": "vrshl_s16", "arguments": [ - "int64_t a", - "int64_t b", - "const int n" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int64_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.4H" }, "b": { - "register": "Dn" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SRSRA" + "SRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsrad_n_u64", + "name": "vrshl_s32", "arguments": [ - "uint64_t a", - "uint64_t b", - "const int n" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "uint64_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.2S" }, "b": { - "register": "Dn" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "URSRA" + "SRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsraq_n_s16", + "name": "vrshl_s64", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "const int n" + "int64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Dn" }, "b": { - "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 16 + "register": "Dm" } }, "Architectures": [ @@ -92905,31 +296591,26 @@ ], "instructions": [ [ - "SRSRA" + "SRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsraq_n_s32", + "name": "vrshl_s8", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "const int n" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.8B" }, "b": { - "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 32 + "register": "Vm.8B" } }, "Architectures": [ @@ -92939,31 +296620,26 @@ ], "instructions": [ [ - "SRSRA" + "SRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsraq_n_s64", + "name": "vrshl_u16", "arguments": [ - "int64x2_t a", - "int64x2_t b", - "const int n" + "uint16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.4H" }, "b": { - "register": "Vn.2D" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Vm.4H" } }, "Architectures": [ @@ -92973,31 +296649,26 @@ ], "instructions": [ [ - "SRSRA" + "URSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsraq_n_s8", + "name": "vrshl_u32", "arguments": [ - "int8x16_t a", - "int8x16_t b", - "const int n" + "uint32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vn.2S" }, "b": { - "register": "Vn.16B" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Vm.2S" } }, "Architectures": [ @@ -93007,31 +296678,26 @@ ], "instructions": [ [ - "SRSRA" + "URSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsraq_n_u16", + "name": "vrshl_u64", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", - "const int n" + "uint64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Dn" }, "b": { - "register": "Vn.8H" - }, - "n": { - "minimum": 1, - "maximum": 16 + "register": "Dm" } }, "Architectures": [ @@ -93041,31 +296707,26 @@ ], "instructions": [ [ - "URSRA" + "URSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsraq_n_u32", + "name": "vrshl_u8", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "const int n" + "uint8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.8B" }, "b": { - "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 32 + "register": "Vm.8B" } }, "Architectures": [ @@ -93075,88 +296736,73 @@ ], "instructions": [ [ - "URSRA" + "URSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsraq_n_u64", + "name": "vrshld_s64", "arguments": [ - "uint64x2_t a", - "uint64x2_t b", - "const int n" + "int64_t a", + "int64_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Dn" }, "b": { - "register": "Vn.2D" - }, - "n": { - "minimum": 1, - "maximum": 64 + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSRA" + "SRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsraq_n_u8", + "name": "vrshld_u64", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", - "const int n" + "uint64_t a", + "int64_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Dn" }, "b": { - "register": "Vn.16B" - }, - "n": { - "minimum": 1, - "maximum": 8 + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "URSRA" + "URSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_high_s16", + "name": "vrshlq_s16", "arguments": [ - "int8x8_t r", "int16x8_t a", "int16x8_t b" ], "return_type": { - "value": "int8x16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -93164,30 +296810,28 @@ }, "b": { "register": "Vm.8H" - }, - "r": { - "register": "Vd.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "RSUBHN2" + "SRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_high_s32", + "name": "vrshlq_s32", "arguments": [ - "int16x4_t r", "int32x4_t a", "int32x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -93195,30 +296839,28 @@ }, "b": { "register": "Vm.4S" - }, - "r": { - "register": "Vd.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "RSUBHN2" + "SRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_high_s64", + "name": "vrshlq_s64", "arguments": [ - "int32x2_t r", "int64x2_t a", "int64x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { @@ -93226,129 +296868,122 @@ }, "b": { "register": "Vm.2D" - }, - "r": { - "register": "Vd.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "RSUBHN2" + "SRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_high_u16", + "name": "vrshlq_s8", "arguments": [ - "uint8x8_t r", - "uint16x8_t a", - "uint16x8_t b" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.16B" }, "b": { - "register": "Vm.8H" - }, - "r": { - "register": "Vd.8B" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "RSUBHN2" + "SRSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_high_u32", + "name": "vrshlq_u16", "arguments": [ - "uint16x4_t r", - "uint32x4_t a", - "uint32x4_t b" + "uint16x8_t a", + "int16x8_t b" ], "return_type": { "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8H" }, "b": { - "register": "Vm.4S" - }, - "r": { - "register": "Vd.4H" + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "RSUBHN2" + "URSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_high_u64", + "name": "vrshlq_u32", "arguments": [ - "uint32x2_t r", - "uint64x2_t a", - "uint64x2_t b" + "uint32x4_t a", + "int32x4_t b" ], "return_type": { "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4S" }, "b": { - "register": "Vm.2D" - }, - "r": { - "register": "Vd.2S" + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "RSUBHN2" + "URSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_s16", + "name": "vrshlq_u64", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "uint64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "int8x8_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2D" }, "b": { - "register": "Vm.8H" + "register": "Vm.2D" } }, "Architectures": [ @@ -93358,26 +296993,26 @@ ], "instructions": [ [ - "RSUBHN" + "URSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_s32", + "name": "vrshlq_u8", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "uint8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int16x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.16B" }, "b": { - "register": "Vm.4S" + "register": "Vm.16B" } }, "Architectures": [ @@ -93387,26 +297022,27 @@ ], "instructions": [ [ - "RSUBHN" + "URSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_s64", + "name": "vrshr_n_s16", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "int16x4_t a", + "const int n" ], "return_type": { - "value": "int32x2_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4H" }, - "b": { - "register": "Vm.2D" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -93416,26 +297052,27 @@ ], "instructions": [ [ - "RSUBHN" + "SRSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_u16", + "name": "vrshr_n_s32", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "int32x2_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2S" }, - "b": { - "register": "Vm.8H" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -93445,26 +297082,27 @@ ], "instructions": [ [ - "RSUBHN" + "SRSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_u32", + "name": "vrshr_n_s64", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int64x1_t a", + "const int n" ], "return_type": { - "value": "uint16x4_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Dn" }, - "b": { - "register": "Vm.4S" + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -93474,26 +297112,27 @@ ], "instructions": [ [ - "RSUBHN" + "SRSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vrsubhn_u64", + "name": "vrshr_n_s8", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "int8x8_t a", + "const int n" ], "return_type": { - "value": "uint32x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.8B" }, - "b": { - "register": "Vm.2D" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -93503,31 +297142,27 @@ ], "instructions": [ [ - "RSUBHN" + "SRSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_f16", + "name": "vrshr_n_u16", "arguments": [ - "float16_t a", - "float16x4_t v", - "const int lane" + "uint16x4_t a", + "const int n" ], "return_type": { - "value": "float16x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "VnH" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.4H" }, - "v": { - "register": "Vd.4H" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -93537,31 +297172,27 @@ ], "instructions": [ [ - "MOV" + "URSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_f32", + "name": "vrshr_n_u32", "arguments": [ - "float32_t a", - "float32x2_t v", - "const int lane" + "uint32x2_t a", + "const int n" ], "return_type": { - "value": "float32x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "register": "Vn.2S" }, - "v": { - "register": "Vd.2S" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -93571,63 +297202,57 @@ ], "instructions": [ [ - "MOV" + "URSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_f64", + "name": "vrshr_n_u64", "arguments": [ - "float64_t a", - "float64x1_t v", - "const int lane" + "uint64x1_t a", + "const int n" ], "return_type": { - "value": "float64x1_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 0 + "register": "Dn" }, - "v": { - "register": "Vd.1D" + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MOV" + "URSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_p16", + "name": "vrshr_n_u8", "arguments": [ - "poly16_t a", - "poly16x4_t v", - "const int lane" + "uint8x8_t a", + "const int n" ], "return_type": { - "value": "poly16x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.8B" }, - "v": { - "register": "Vd.4H" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -93637,302 +297262,275 @@ ], "instructions": [ [ - "MOV" + "URSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_p64", + "name": "vrshrd_n_s64", "arguments": [ - "poly64_t a", - "poly64x1_t v", - "const int lane" + "int64_t a", + "const int n" ], "return_type": { - "value": "poly64x1_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 0 + "register": "Dn" }, - "v": { - "register": "Vd.1D" + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "MOV" + "SRSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_p8", + "name": "vrshrd_n_u64", "arguments": [ - "poly8_t a", - "poly8x8_t v", - "const int lane" + "uint64_t a", + "const int n" ], "return_type": { - "value": "poly8x8_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Dn" }, - "v": { - "register": "Vd.8B" + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOV" + "URSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_s16", + "name": "vrshrn_high_n_s16", "arguments": [ - "int16_t a", - "int16x4_t v", - "const int lane" + "int8x8_t r", + "int16x8_t a", + "const int n" ], "return_type": { - "value": "int16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" + "register": "Vn.8H" }, - "lane": { - "minimum": 0, - "maximum": 3 + "n": { + "minimum": 1, + "maximum": 8 }, - "v": { - "register": "Vd.4H" + "r": { + "register": "Vd.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOV" + "RSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_s32", + "name": "vrshrn_high_n_s32", "arguments": [ - "int32_t a", - "int32x2_t v", - "const int lane" + "int16x4_t r", + "int32x4_t a", + "const int n" ], "return_type": { - "value": "int32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" + "register": "Vn.4S" }, - "lane": { - "minimum": 0, - "maximum": 1 + "n": { + "minimum": 1, + "maximum": 16 }, - "v": { - "register": "Vd.2S" + "r": { + "register": "Vd.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOV" + "RSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_s64", + "name": "vrshrn_high_n_s64", "arguments": [ - "int64_t a", - "int64x1_t v", - "const int lane" + "int32x2_t r", + "int64x2_t a", + "const int n" ], "return_type": { - "value": "int64x1_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" + "register": "Vn.2D" }, - "lane": { - "minimum": 0, - "maximum": 0 + "n": { + "minimum": 1, + "maximum": 32 }, - "v": { - "register": "Vd.1D" + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOV" + "RSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_s8", + "name": "vrshrn_high_n_u16", "arguments": [ - "int8_t a", - "int8x8_t v", - "const int lane" + "uint8x8_t r", + "uint16x8_t a", + "const int n" ], "return_type": { - "value": "int8x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" + "register": "Vn.8H" }, - "lane": { - "minimum": 0, - "maximum": 7 + "n": { + "minimum": 1, + "maximum": 8 }, - "v": { + "r": { "register": "Vd.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOV" + "RSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_u16", + "name": "vrshrn_high_n_u32", "arguments": [ - "uint16_t a", - "uint16x4_t v", - "const int lane" + "uint16x4_t r", + "uint32x4_t a", + "const int n" ], "return_type": { - "value": "uint16x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" + "register": "Vn.4S" }, - "lane": { - "minimum": 0, - "maximum": 3 + "n": { + "minimum": 1, + "maximum": 16 }, - "v": { + "r": { "register": "Vd.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOV" + "RSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_u32", + "name": "vrshrn_high_n_u64", "arguments": [ - "uint32_t a", - "uint32x2_t v", - "const int lane" + "uint32x2_t r", + "uint64x2_t a", + "const int n" ], "return_type": { - "value": "uint32x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" + "register": "Vn.2D" }, - "lane": { - "minimum": 0, - "maximum": 1 + "n": { + "minimum": 1, + "maximum": 32 }, - "v": { - "register": "Vd.2S" + "r": { + "register": "32(Vd)" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOV" + "RSHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_u64", + "name": "vrshrn_n_s16", "arguments": [ - "uint64_t a", - "uint64x1_t v", - "const int lane" + "int16x8_t a", + "const int n" ], "return_type": { - "value": "uint64x1_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 0 + "register": "Vn.8H" }, - "v": { - "register": "Vd.1D" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -93942,31 +297540,27 @@ ], "instructions": [ [ - "MOV" + "RSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vset_lane_u8", + "name": "vrshrn_n_s32", "arguments": [ - "uint8_t a", - "uint8x8_t v", - "const int lane" + "int32x4_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Vn.4S" }, - "v": { - "register": "Vd.8B" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -93976,31 +297570,27 @@ ], "instructions": [ [ - "MOV" + "RSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_f16", + "name": "vrshrn_n_s64", "arguments": [ - "float16_t a", - "float16x8_t v", - "const int lane" + "int64x2_t a", + "const int n" ], "return_type": { - "value": "float16x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "VnH" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Vn.2D" }, - "v": { - "register": "Vd.8H" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -94010,31 +297600,27 @@ ], "instructions": [ [ - "MOV" + "RSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_f32", + "name": "vrshrn_n_u16", "arguments": [ - "float32_t a", - "float32x4_t v", - "const int lane" + "uint16x8_t a", + "const int n" ], "return_type": { - "value": "float32x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.8H" }, - "v": { - "register": "Vd.4S" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -94044,63 +297630,57 @@ ], "instructions": [ [ - "MOV" + "RSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_f64", + "name": "vrshrn_n_u32", "arguments": [ - "float64_t a", - "float64x2_t v", - "const int lane" + "uint32x4_t a", + "const int n" ], "return_type": { - "value": "float64x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "register": "Vn.4S" }, - "v": { - "register": "Vd.2D" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "MOV" + "RSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_p16", + "name": "vrshrn_n_u64", "arguments": [ - "poly16_t a", - "poly16x8_t v", - "const int lane" + "uint64x2_t a", + "const int n" ], "return_type": { - "value": "poly16x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Vn.2D" }, - "v": { - "register": "Vd.8H" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -94110,64 +297690,57 @@ ], "instructions": [ [ - "MOV" + "RSHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_p64", + "name": "vrshrq_n_s16", "arguments": [ - "poly64_t a", - "poly64x2_t v", - "const int lane" + "int16x8_t a", + "const int n" ], "return_type": { - "value": "poly64x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "register": "Vn.8H" }, - "v": { - "register": "Vd.2D" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "MOV" + "SRSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_p8", + "name": "vrshrq_n_s32", "arguments": [ - "poly8_t a", - "poly8x16_t v", - "const int lane" + "int32x4_t a", + "const int n" ], "return_type": { - "value": "poly8x16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 15 + "register": "Vn.4S" }, - "v": { - "register": "Vd.16B" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -94177,31 +297750,27 @@ ], "instructions": [ [ - "MOV" + "SRSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_s16", + "name": "vrshrq_n_s64", "arguments": [ - "int16_t a", - "int16x8_t v", - "const int lane" + "int64x2_t a", + "const int n" ], "return_type": { - "value": "int16x8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Vn.2D" }, - "v": { - "register": "Vd.8H" + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -94211,31 +297780,27 @@ ], "instructions": [ [ - "MOV" + "SRSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_s32", + "name": "vrshrq_n_s8", "arguments": [ - "int32_t a", - "int32x4_t v", - "const int lane" + "int8x16_t a", + "const int n" ], "return_type": { - "value": "int32x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.16B" }, - "v": { - "register": "Vd.4S" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -94245,31 +297810,27 @@ ], "instructions": [ [ - "MOV" + "SRSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_s64", + "name": "vrshrq_n_u16", "arguments": [ - "int64_t a", - "int64x2_t v", - "const int lane" + "uint16x8_t a", + "const int n" ], "return_type": { - "value": "int64x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 1 + "register": "Vn.8H" }, - "v": { - "register": "Vd.2D" + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -94279,31 +297840,27 @@ ], "instructions": [ [ - "MOV" + "URSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_s8", + "name": "vrshrq_n_u32", "arguments": [ - "int8_t a", - "int8x16_t v", - "const int lane" + "uint32x4_t a", + "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 15 + "register": "Vn.4S" }, - "v": { - "register": "Vd.16B" + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -94313,31 +297870,27 @@ ], "instructions": [ [ - "MOV" + "URSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_u16", + "name": "vrshrq_n_u64", "arguments": [ - "uint16_t a", - "uint16x8_t v", - "const int lane" + "uint64x2_t a", + "const int n" ], "return_type": { - "value": "uint16x8_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 7 + "register": "Vn.2D" }, - "v": { - "register": "Vd.8H" + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -94347,31 +297900,27 @@ ], "instructions": [ [ - "MOV" + "URSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_u32", + "name": "vrshrq_n_u8", "arguments": [ - "uint32_t a", - "uint32x4_t v", - "const int lane" + "uint8x16_t a", + "const int n" ], "return_type": { - "value": "uint32x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 3 + "register": "Vn.16B" }, - "v": { - "register": "Vd.4S" + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -94381,65 +297930,46 @@ ], "instructions": [ [ - "MOV" + "URSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_u64", + "name": "vrsqrte_f16", "arguments": [ - "uint64_t a", - "uint64x2_t v", - "const int lane" + "float16x4_t a" ], "return_type": { - "value": "uint64x2_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "v": { - "register": "Vd.2D" + "register": "Vn.4H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "MOV" + "FRSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsetq_lane_u8", + "name": "vrsqrte_f32", "arguments": [ - "uint8_t a", - "uint8x16_t v", - "const int lane" + "float32x2_t a" ], "return_type": { - "value": "uint8x16_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Rn" - }, - "lane": { - "minimum": 0, - "maximum": 15 - }, - "v": { - "register": "Vd.16B" + "register": "Vn.2S" } }, "Architectures": [ @@ -94449,150 +297979,116 @@ ], "instructions": [ [ - "MOV" + "FRSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha1cq_u32", + "name": "vrsqrte_f64", "arguments": [ - "uint32x4_t hash_abcd", - "uint32_t hash_e", - "uint32x4_t wk" + "float64x1_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { - "hash_abcd": { - "register": "Qd" - }, - "hash_e": { - "register": "Sn" - }, - "wk": { - "register": "Vm.4S" + "a": { + "register": "Dn" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SHA1C" + "FRSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha1h_u32", + "name": "vrsqrte_u32", "arguments": [ - "uint32_t hash_e" + "uint32x2_t a" ], "return_type": { - "value": "uint32_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { - "hash_e": { - "register": "Sn" + "a": { + "register": "Vn.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "SHA1H" + "URSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha1mq_u32", + "name": "vrsqrted_f64", "arguments": [ - "uint32x4_t hash_abcd", - "uint32_t hash_e", - "uint32x4_t wk" + "float64_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float64_t" }, "Arguments_Preparation": { - "hash_abcd": { - "register": "Qd" - }, - "hash_e": { - "register": "Sn" - }, - "wk": { - "register": "Vm.4S" + "a": { + "register": "Dn" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SHA1M" + "FRSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha1pq_u32", + "name": "vrsqrteh_f16", "arguments": [ - "uint32x4_t hash_abcd", - "uint32_t hash_e", - "uint32x4_t wk" + "float16_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float16_t" }, "Arguments_Preparation": { - "hash_abcd": { - "register": "Qd" - }, - "hash_e": { - "register": "Sn" - }, - "wk": { - "register": "Vm.4S" + "a": { + "register": "Hn" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SHA1P" + "FRSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha1su0q_u32", + "name": "vrsqrteq_f16", "arguments": [ - "uint32x4_t w0_3", - "uint32x4_t w4_7", - "uint32x4_t w8_11" + "float16x8_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { - "w0_3": { - "register": "Vd.4S" - }, - "w4_7": { - "register": "Vn.4S" - }, - "w8_11": { - "register": "Vm.4S" + "a": { + "register": "Vn.8H" } }, "Architectures": [ @@ -94601,150 +298097,122 @@ ], "instructions": [ [ - "SHA1SU0" + "FRSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha1su1q_u32", + "name": "vrsqrteq_f32", "arguments": [ - "uint32x4_t tw0_3", - "uint32x4_t w12_15" + "float32x4_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { - "tw0_3": { - "register": "Vd.4S" - }, - "w12_15": { + "a": { "register": "Vn.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "SHA1SU1" + "FRSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha256h2q_u32", + "name": "vrsqrteq_f64", "arguments": [ - "uint32x4_t hash_efgh", - "uint32x4_t hash_abcd", - "uint32x4_t wk" + "float64x2_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { - "hash_abcd": { - "register": "Qn" - }, - "hash_efgh": { - "register": "Qd" - }, - "wk": { - "register": "Vm.4S" + "a": { + "register": "Vn.2D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SHA256H2" + "FRSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha256hq_u32", + "name": "vrsqrteq_u32", "arguments": [ - "uint32x4_t hash_abcd", - "uint32x4_t hash_efgh", - "uint32x4_t wk" + "uint32x4_t a" ], "return_type": { "value": "uint32x4_t" }, "Arguments_Preparation": { - "hash_abcd": { - "register": "Qd" - }, - "hash_efgh": { - "register": "Qn" - }, - "wk": { - "register": "Vm.4S" + "a": { + "register": "Vn.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "SHA256H" + "URSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha256su0q_u32", + "name": "vrsqrtes_f32", "arguments": [ - "uint32x4_t w0_3", - "uint32x4_t w4_7" + "float32_t a" ], "return_type": { - "value": "uint32x4_t" + "value": "float32_t" }, "Arguments_Preparation": { - "w0_3": { - "register": "Vd.4S" - }, - "w4_7": { - "register": "Vn.4S" + "a": { + "register": "Sn" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SHA256SU0" + "FRSQRTE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha256su1q_u32", + "name": "vrsqrts_f16", "arguments": [ - "uint32x4_t tw0_3", - "uint32x4_t w8_11", - "uint32x4_t w12_15" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "float16x4_t" }, "Arguments_Preparation": { - "tw0_3": { - "register": "Vd.4S" - }, - "w12_15": { - "register": "Vm.4S" + "a": { + "register": "Vn.4H" }, - "w8_11": { - "register": "Vn.4S" + "b": { + "register": "Vm.4H" } }, "Architectures": [ @@ -94753,84 +298221,82 @@ ], "instructions": [ [ - "SHA256SU1" + "FRSQRTS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha512h2q_u64", + "name": "vrsqrts_f32", "arguments": [ - "uint64x2_t sum_ab", - "uint64x2_t hash_c_", - "uint64x2_t hash_ab" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "float32x2_t" }, "Arguments_Preparation": { - "hash_ab": {}, - "hash_c_": { - "register": "Qn" + "a": { + "register": "Vn.2S" }, - "sum_ab": { - "register": "Qd" + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SHA512H2" + "FRSQRTS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha512hq_u64", + "name": "vrsqrts_f64", "arguments": [ - "uint64x2_t hash_ed", - "uint64x2_t hash_gf", - "uint64x2_t kwh_kwh2" + "float64x1_t a", + "float64x1_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "float64x1_t" }, "Arguments_Preparation": { - "hash_ed": { - "register": "Qd" - }, - "hash_gf": { - "register": "Qn" + "a": { + "register": "Dn" }, - "kwh_kwh2": {} + "b": { + "register": "Dm" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "SHA512H" + "FRSQRTS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha512su0q_u64", + "name": "vrsqrtsd_f64", "arguments": [ - "uint64x2_t w0_1", - "uint64x2_t w2_" + "float64_t a", + "float64_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "float64_t" }, "Arguments_Preparation": { - "w0_1": { - "register": "Vd.2D" + "a": { + "register": "Dn" }, - "w2_": { - "register": "Vn.2D" + "b": { + "register": "Dm" } }, "Architectures": [ @@ -94838,86 +298304,81 @@ ], "instructions": [ [ - "SHA512SU0" + "FRSQRTS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsha512su1q_u64", + "name": "vrsqrtsh_f16", "arguments": [ - "uint64x2_t s01_s02", - "uint64x2_t w14_15", - "uint64x2_t w9_10" + "float16_t a", + "float16_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "float16_t" }, "Arguments_Preparation": { - "s01_s02": { - "register": "Vd.2D" - }, - "w14_15": { - "register": "Vn.2D" + "a": { + "register": "Hn" }, - "w9_10": {} + "b": { + "register": "Hm" + } }, "Architectures": [ "A64" ], "instructions": [ [ - "SHA512SU1" + "FRSQRTS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_n_s16", + "name": "vrsqrtsq_f16", "arguments": [ - "int16x4_t a", - "const int n" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "int16x4_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.8H" }, - "n": { - "minimum": 0, - "maximum": 15 + "b": { + "register": "Vm.8H" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SHL" + "FRSQRTS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_n_s32", + "name": "vrsqrtsq_f32", "arguments": [ - "int32x2_t a", - "const int n" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int32x2_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4S" }, - "n": { - "minimum": 0, - "maximum": 31 + "b": { + "register": "Vm.4S" } }, "Architectures": [ @@ -94927,87 +298388,85 @@ ], "instructions": [ [ - "SHL" + "FRSQRTS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_n_s64", + "name": "vrsqrtsq_f64", "arguments": [ - "int64x1_t a", - "const int n" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "int64x1_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.2D" }, - "n": { - "minimum": 0, - "maximum": 63 + "b": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SHL" + "FRSQRTS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_n_s8", + "name": "vrsqrtss_f32", "arguments": [ - "int8x8_t a", - "const int n" + "float32_t a", + "float32_t b" ], "return_type": { - "value": "int8x8_t" + "value": "float32_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Sn" }, - "n": { - "minimum": 0, - "maximum": 7 + "b": { + "register": "Sm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SHL" + "FRSQRTS" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_n_u16", + "name": "vrsra_n_s16", "arguments": [ - "uint16x4_t a", + "int16x4_t a", + "int16x4_t b", "const int n" ], "return_type": { - "value": "uint16x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.4H" + }, + "b": { "register": "Vn.4H" }, "n": { - "minimum": 0, - "maximum": 15 + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -95017,27 +298476,31 @@ ], "instructions": [ [ - "SHL" + "SRSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_n_u32", + "name": "vrsra_n_s32", "arguments": [ - "uint32x2_t a", + "int32x2_t a", + "int32x2_t b", "const int n" ], "return_type": { - "value": "uint32x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.2S" + }, + "b": { "register": "Vn.2S" }, "n": { - "minimum": 0, - "maximum": 31 + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -95047,27 +298510,31 @@ ], "instructions": [ [ - "SHL" + "SRSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_n_u64", + "name": "vrsra_n_s64", "arguments": [ - "uint64x1_t a", + "int64x1_t a", + "int64x1_t b", "const int n" ], "return_type": { - "value": "uint64x1_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { + "register": "Dd" + }, + "b": { "register": "Dn" }, "n": { - "minimum": 0, - "maximum": 63 + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -95077,27 +298544,31 @@ ], "instructions": [ [ - "SHL" + "SRSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_n_u8", + "name": "vrsra_n_s8", "arguments": [ - "uint8x8_t a", + "int8x8_t a", + "int8x8_t b", "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.8B" + }, + "b": { "register": "Vn.8B" }, "n": { - "minimum": 0, - "maximum": 7 + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -95107,26 +298578,31 @@ ], "instructions": [ [ - "SHL" + "SRSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_s16", + "name": "vrsra_n_u16", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "uint16x4_t a", + "uint16x4_t b", + "const int n" ], "return_type": { - "value": "int16x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vd.4H" }, "b": { - "register": "Vm.4H" + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -95136,26 +298612,31 @@ ], "instructions": [ [ - "SSHL" + "URSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_s32", + "name": "vrsra_n_u32", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "uint32x2_t a", + "uint32x2_t b", + "const int n" ], "return_type": { - "value": "int32x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vd.2S" }, "b": { - "register": "Vm.2S" + "register": "Vn.2S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -95165,26 +298646,31 @@ ], "instructions": [ [ - "SSHL" + "URSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_s64", + "name": "vrsra_n_u64", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "uint64x1_t a", + "uint64x1_t b", + "const int n" ], "return_type": { - "value": "int64x1_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Dd" }, "b": { - "register": "Dm" + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -95194,26 +298680,31 @@ ], "instructions": [ [ - "SSHL" + "URSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_s8", + "name": "vrsra_n_u8", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint8x8_t a", + "uint8x8_t b", + "const int n" ], "return_type": { - "value": "int8x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vd.8B" }, "b": { - "register": "Vm.8B" + "register": "Vn.8B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -95223,84 +298714,95 @@ ], "instructions": [ [ - "SSHL" + "URSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_u16", + "name": "vrsrad_n_s64", "arguments": [ - "uint16x4_t a", - "int16x4_t b" + "int64_t a", + "int64_t b", + "const int n" ], "return_type": { - "value": "uint16x4_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Dd" }, "b": { - "register": "Vm.4H" + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USHL" + "SRSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_u32", + "name": "vrsrad_n_u64", "arguments": [ - "uint32x2_t a", - "int32x2_t b" + "uint64_t a", + "uint64_t b", + "const int n" ], "return_type": { - "value": "uint32x2_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Dd" }, "b": { - "register": "Vm.2S" + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USHL" + "URSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_u64", + "name": "vrsraq_n_s16", "arguments": [ - "uint64x1_t a", - "int64x1_t b" + "int16x8_t a", + "int16x8_t b", + "const int n" ], "return_type": { - "value": "uint64x1_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.8H" }, "b": { - "register": "Dm" + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -95310,26 +298812,31 @@ ], "instructions": [ [ - "USHL" + "SRSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshl_u8", + "name": "vrsraq_n_s32", "arguments": [ - "uint8x8_t a", - "int8x8_t b" + "int32x4_t a", + "int32x4_t b", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vd.4S" }, "b": { - "register": "Vm.8B" + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -95339,193 +298846,234 @@ ], "instructions": [ [ - "USHL" + "SRSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshld_n_s64", + "name": "vrsraq_n_s64", "arguments": [ - "int64_t a", + "int64x2_t a", + "int64x2_t b", "const int n" ], "return_type": { - "value": "int64_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2D" }, "n": { - "minimum": 0, - "maximum": 63 + "minimum": 1, + "maximum": 64 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SHL" + "SRSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshld_n_u64", + "name": "vrsraq_n_s8", "arguments": [ - "uint64_t a", + "int8x16_t a", + "int8x16_t b", "const int n" ], "return_type": { - "value": "uint64_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" }, "n": { - "minimum": 0, - "maximum": 63 + "minimum": 1, + "maximum": 8 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SHL" + "SRSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshld_s64", + "name": "vrsraq_n_u16", "arguments": [ - "int64_t a", - "int64_t b" + "uint16x8_t a", + "uint16x8_t b", + "const int n" ], "return_type": { - "value": "int64_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.8H" }, "b": { - "register": "Dm" + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSHL" + "URSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshld_u64", + "name": "vrsraq_n_u32", "arguments": [ - "uint64_t a", - "int64_t b" + "uint32x4_t a", + "uint32x4_t b", + "const int n" ], "return_type": { - "value": "uint64_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.4S" }, "b": { - "register": "Dm" + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USHL" + "URSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_high_n_s16", + "name": "vrsraq_n_u64", "arguments": [ - "int16x8_t a", + "uint64x2_t a", + "uint64x2_t b", "const int n" ], "return_type": { - "value": "int32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2D" }, "n": { - "minimum": 0, - "maximum": 16 + "minimum": 1, + "maximum": 64 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSHLL2" + "URSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_high_n_s32", + "name": "vrsraq_n_u8", "arguments": [ - "int32x4_t a", + "uint8x16_t a", + "uint8x16_t b", "const int n" ], "return_type": { - "value": "int64x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" }, "n": { - "minimum": 0, - "maximum": 32 + "minimum": 1, + "maximum": 8 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSHLL2" + "URSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_high_n_s8", + "name": "vrsubhn_high_s16", "arguments": [ - "int8x16_t a", - "const int n" + "int8x8_t r", + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, - "n": { - "minimum": 0, - "maximum": 8 + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -95533,27 +299081,30 @@ ], "instructions": [ [ - "SSHLL2" + "RSUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_high_n_u16", + "name": "vrsubhn_high_s32", "arguments": [ - "uint16x8_t a", - "const int n" + "int16x4_t r", + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4S" }, - "n": { - "minimum": 0, - "maximum": 16 + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -95561,27 +299112,30 @@ ], "instructions": [ [ - "USHLL2" + "RSUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_high_n_u32", + "name": "vrsubhn_high_s64", "arguments": [ - "uint32x4_t a", - "const int n" + "int32x2_t r", + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2D" }, - "n": { - "minimum": 0, - "maximum": 32 + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -95589,27 +299143,30 @@ ], "instructions": [ [ - "USHLL2" + "RSUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_high_n_u8", + "name": "vrsubhn_high_u16", "arguments": [ - "uint8x16_t a", - "const int n" + "uint8x8_t r", + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, - "n": { - "minimum": 0, - "maximum": 8 + "b": { + "register": "Vm.8H" + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -95617,87 +299174,88 @@ ], "instructions": [ [ - "USHLL2" + "RSUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_n_s16", + "name": "vrsubhn_high_u32", "arguments": [ - "int16x4_t a", - "const int n" + "uint16x4_t r", + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" }, - "n": { - "minimum": 0, - "maximum": 16 + "b": { + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSHLL" + "RSUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_n_s32", + "name": "vrsubhn_high_u64", "arguments": [ - "int32x2_t a", - "const int n" + "uint32x2_t r", + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.2D" }, - "n": { - "minimum": 0, - "maximum": 32 + "b": { + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSHLL" + "RSUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_n_s8", + "name": "vrsubhn_s16", "arguments": [ - "int8x8_t a", - "const int n" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, - "n": { - "minimum": 0, - "maximum": 8 + "b": { + "register": "Vm.8H" } }, "Architectures": [ @@ -95707,27 +299265,26 @@ ], "instructions": [ [ - "SSHLL" + "RSUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_n_u16", + "name": "vrsubhn_s32", "arguments": [ - "uint16x4_t a", - "const int n" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" }, - "n": { - "minimum": 0, - "maximum": 16 + "b": { + "register": "Vm.4S" } }, "Architectures": [ @@ -95737,27 +299294,26 @@ ], "instructions": [ [ - "USHLL" + "RSUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_n_u32", + "name": "vrsubhn_s64", "arguments": [ - "uint32x2_t a", - "const int n" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.2D" }, - "n": { - "minimum": 0, - "maximum": 32 + "b": { + "register": "Vm.2D" } }, "Architectures": [ @@ -95767,27 +299323,26 @@ ], "instructions": [ [ - "USHLL" + "RSUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshll_n_u8", + "name": "vrsubhn_u16", "arguments": [ - "uint8x8_t a", - "const int n" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, - "n": { - "minimum": 0, - "maximum": 8 + "b": { + "register": "Vm.8H" } }, "Architectures": [ @@ -95797,27 +299352,26 @@ ], "instructions": [ [ - "USHLL" + "RSUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_n_s16", + "name": "vrsubhn_u32", "arguments": [ - "int16x8_t a", - "const int n" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4S" }, - "n": { - "minimum": 0, - "maximum": 15 + "b": { + "register": "Vm.4S" } }, "Architectures": [ @@ -95827,27 +299381,26 @@ ], "instructions": [ [ - "SHL" + "RSUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_n_s32", + "name": "vrsubhn_u64", "arguments": [ - "int32x4_t a", - "const int n" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2D" }, - "n": { - "minimum": 0, - "maximum": 31 + "b": { + "register": "Vm.2D" } }, "Architectures": [ @@ -95857,177 +299410,166 @@ ], "instructions": [ [ - "SHL" + "RSUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_n_s64", + "name": "vscale_f16", "arguments": [ - "int64x2_t a", - "const int n" + "float16x4_t vn", + "int16x4_t vm" ], "return_type": { - "value": "int64x2_t" + "value": "float16x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "vm": { + "register": "Vm.4H" }, - "n": { - "minimum": 0, - "maximum": 63 + "vn": { + "register": "Vn.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SHL" + "FSCALE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_n_s8", + "name": "vscale_f32", "arguments": [ - "int8x16_t a", - "const int n" + "float32x2_t vn", + "int32x2_t vm" ], "return_type": { - "value": "int8x16_t" + "value": "float32x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "vm": { + "register": "Vm.2S" }, - "n": { - "minimum": 0, - "maximum": 7 + "vn": { + "register": "Vn.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SHL" + "FSCALE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_n_u16", + "name": "vscaleq_f16", "arguments": [ - "uint16x8_t a", - "const int n" + "float16x8_t vn", + "int16x8_t vm" ], "return_type": { - "value": "uint16x8_t" + "value": "float16x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "vm": { + "register": "Vm.8H" }, - "n": { - "minimum": 0, - "maximum": 15 + "vn": { + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SHL" + "FSCALE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_n_u32", + "name": "vscaleq_f32", "arguments": [ - "uint32x4_t a", - "const int n" + "float32x4_t vn", + "int32x4_t vm" ], "return_type": { - "value": "uint32x4_t" + "value": "float32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "vm": { + "register": "Vm.4S" }, - "n": { - "minimum": 0, - "maximum": 31 + "vn": { + "register": "Vn.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SHL" + "FSCALE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_n_u64", + "name": "vscaleq_f64", "arguments": [ - "uint64x2_t a", - "const int n" + "float64x2_t vn", + "int64x2_t vm" ], "return_type": { - "value": "uint64x2_t" + "value": "float64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "vm": { + "register": "Vm.2D" }, - "n": { - "minimum": 0, - "maximum": 63 + "vn": { + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SHL" + "FSCALE" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_n_u8", + "name": "vset_lane_f16", "arguments": [ - "uint8x16_t a", - "const int n" + "float16_t a", + "float16x4_t v", + "const int lane" ], "return_type": { - "value": "uint8x16_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "VnH" }, - "n": { + "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 + }, + "v": { + "register": "Vd.4H" } }, "Architectures": [ @@ -96037,26 +299579,31 @@ ], "instructions": [ [ - "SHL" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_s16", + "name": "vset_lane_f32", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "float32_t a", + "float32x2_t v", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Rn" }, - "b": { - "register": "Vm.8H" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vd.2S" } }, "Architectures": [ @@ -96066,55 +299613,63 @@ ], "instructions": [ [ - "SSHL" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_s32", + "name": "vset_lane_f64", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "float64_t a", + "float64x1_t v", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "float64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Rn" }, - "b": { - "register": "Vm.4S" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "v": { + "register": "Vd.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSHL" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_s64", + "name": "vset_lane_p16", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "poly16_t a", + "poly16x4_t v", + "const int lane" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Rn" }, - "b": { - "register": "Vm.2D" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vd.4H" } }, "Architectures": [ @@ -96124,55 +299679,64 @@ ], "instructions": [ [ - "SSHL" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_s8", + "name": "vset_lane_p64", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "poly64_t a", + "poly64x1_t v", + "const int lane" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Rn" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "v": { + "register": "Vd.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SSHL" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_u16", + "name": "vset_lane_p8", "arguments": [ - "uint16x8_t a", - "int16x8_t b" + "poly8_t a", + "poly8x8_t v", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Rn" }, - "b": { - "register": "Vm.8H" + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vd.8B" } }, "Architectures": [ @@ -96182,26 +299746,31 @@ ], "instructions": [ [ - "USHL" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_u32", + "name": "vset_lane_s16", "arguments": [ - "uint32x4_t a", - "int32x4_t b" + "int16_t a", + "int16x4_t v", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Rn" }, - "b": { - "register": "Vm.4S" + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vd.4H" } }, "Architectures": [ @@ -96211,26 +299780,31 @@ ], "instructions": [ [ - "USHL" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_u64", + "name": "vset_lane_s32", "arguments": [ - "uint64x2_t a", - "int64x2_t b" + "int32_t a", + "int32x2_t v", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Rn" }, - "b": { - "register": "Vm.2D" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vd.2S" } }, "Architectures": [ @@ -96240,26 +299814,31 @@ ], "instructions": [ [ - "USHL" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshlq_u8", + "name": "vset_lane_s64", "arguments": [ - "uint8x16_t a", - "int8x16_t b" + "int64_t a", + "int64x1_t v", + "const int lane" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Rn" }, - "b": { - "register": "Vm.16B" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "v": { + "register": "Vd.1D" } }, "Architectures": [ @@ -96269,27 +299848,31 @@ ], "instructions": [ [ - "USHL" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshr_n_s16", + "name": "vset_lane_s8", "arguments": [ - "int16x4_t a", - "const int n" + "int8_t a", + "int8x8_t v", + "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 16 + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vd.8B" } }, "Architectures": [ @@ -96299,27 +299882,31 @@ ], "instructions": [ [ - "SSHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshr_n_s32", + "name": "vset_lane_u16", "arguments": [ - "int32x2_t a", - "const int n" + "uint16_t a", + "uint16x4_t v", + "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 32 + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vd.4H" } }, "Architectures": [ @@ -96329,27 +299916,31 @@ ], "instructions": [ [ - "SSHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshr_n_s64", + "name": "vset_lane_u32", "arguments": [ - "int64x1_t a", - "const int n" + "uint32_t a", + "uint32x2_t v", + "const int lane" ], "return_type": { - "value": "int64x1_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 64 + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vd.2S" } }, "Architectures": [ @@ -96359,27 +299950,31 @@ ], "instructions": [ [ - "SSHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshr_n_s8", + "name": "vset_lane_u64", "arguments": [ - "int8x8_t a", - "const int n" + "uint64_t a", + "uint64x1_t v", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 8 + "lane": { + "minimum": 0, + "maximum": 0 + }, + "v": { + "register": "Vd.1D" } }, "Architectures": [ @@ -96389,27 +299984,31 @@ ], "instructions": [ [ - "SSHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshr_n_u16", + "name": "vset_lane_u8", "arguments": [ - "uint16x4_t a", - "const int n" + "uint8_t a", + "uint8x8_t v", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 16 + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vd.8B" } }, "Architectures": [ @@ -96419,27 +300018,31 @@ ], "instructions": [ [ - "USHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshr_n_u32", + "name": "vsetq_lane_f16", "arguments": [ - "uint32x2_t a", - "const int n" + "float16_t a", + "float16x8_t v", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "VnH" }, - "n": { - "minimum": 1, - "maximum": 32 + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vd.8H" } }, "Architectures": [ @@ -96449,27 +300052,31 @@ ], "instructions": [ [ - "USHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshr_n_u64", + "name": "vsetq_lane_f32", "arguments": [ - "uint64x1_t a", - "const int n" + "float32_t a", + "float32x4_t v", + "const int lane" ], "return_type": { - "value": "uint64x1_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 64 + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vd.4S" } }, "Architectures": [ @@ -96479,305 +300086,334 @@ ], "instructions": [ [ - "USHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshr_n_u8", + "name": "vsetq_lane_f64", "arguments": [ - "uint8x8_t a", - "const int n" + "float64_t a", + "float64x2_t v", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 8 + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vd.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrd_n_s64", + "name": "vsetq_lane_p16", "arguments": [ - "int64_t a", - "const int n" + "poly16_t a", + "poly16x8_t v", + "const int lane" ], "return_type": { - "value": "int64_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 64 + "lane": { + "minimum": 0, + "maximum": 7 + }, + "v": { + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrd_n_u64", + "name": "vsetq_lane_p64", "arguments": [ - "uint64_t a", - "const int n" + "poly64_t a", + "poly64x2_t v", + "const int lane" ], "return_type": { - "value": "uint64_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 64 + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vd.2D" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "USHR" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_high_n_s16", + "name": "vsetq_lane_p8", "arguments": [ - "int8x8_t r", - "int16x8_t a", - "const int n" + "poly8_t a", + "poly8x16_t v", + "const int lane" ], "return_type": { - "value": "int8x16_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 8 + "lane": { + "minimum": 0, + "maximum": 15 }, - "r": { - "register": "Vd.8B" + "v": { + "register": "Vd.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SHRN2" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_high_n_s32", + "name": "vsetq_lane_s16", "arguments": [ - "int16x4_t r", - "int32x4_t a", - "const int n" + "int16_t a", + "int16x8_t v", + "const int lane" ], "return_type": { "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 16 + "lane": { + "minimum": 0, + "maximum": 7 }, - "r": { - "register": "Vd.4H" + "v": { + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SHRN2" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_high_n_s64", + "name": "vsetq_lane_s32", "arguments": [ - "int32x2_t r", - "int64x2_t a", - "const int n" + "int32_t a", + "int32x4_t v", + "const int lane" ], "return_type": { "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 32 + "lane": { + "minimum": 0, + "maximum": 3 }, - "r": { - "register": "Vd.2S" + "v": { + "register": "Vd.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SHRN2" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_high_n_u16", + "name": "vsetq_lane_s64", "arguments": [ - "uint8x8_t r", - "uint16x8_t a", - "const int n" + "int64_t a", + "int64x2_t v", + "const int lane" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 8 + "lane": { + "minimum": 0, + "maximum": 1 }, - "r": { - "register": "Vd.8B" + "v": { + "register": "Vd.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SHRN2" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_high_n_u32", + "name": "vsetq_lane_s8", "arguments": [ - "uint16x4_t r", - "uint32x4_t a", - "const int n" + "int8_t a", + "int8x16_t v", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 16 + "lane": { + "minimum": 0, + "maximum": 15 }, - "r": { - "register": "Vd.4H" + "v": { + "register": "Vd.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SHRN2" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_high_n_u64", + "name": "vsetq_lane_u16", "arguments": [ - "uint32x2_t r", - "uint64x2_t a", - "const int n" + "uint16_t a", + "uint16x8_t v", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 32 + "lane": { + "minimum": 0, + "maximum": 7 }, - "r": { - "register": "Vd.2S" + "v": { + "register": "Vd.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SHRN2" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_n_s16", + "name": "vsetq_lane_u32", "arguments": [ - "int16x8_t a", - "const int n" + "uint32_t a", + "uint32x4_t v", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 8 + "lane": { + "minimum": 0, + "maximum": 3 + }, + "v": { + "register": "Vd.4S" } }, "Architectures": [ @@ -96787,27 +300423,31 @@ ], "instructions": [ [ - "SHRN" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_n_s32", + "name": "vsetq_lane_u64", "arguments": [ - "int32x4_t a", - "const int n" + "uint64_t a", + "uint64x2_t v", + "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 16 + "lane": { + "minimum": 0, + "maximum": 1 + }, + "v": { + "register": "Vd.2D" } }, "Architectures": [ @@ -96817,27 +300457,31 @@ ], "instructions": [ [ - "SHRN" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_n_s64", + "name": "vsetq_lane_u8", "arguments": [ - "int64x2_t a", - "const int n" + "uint8_t a", + "uint8x16_t v", + "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Rn" }, - "n": { - "minimum": 1, - "maximum": 32 + "lane": { + "minimum": 0, + "maximum": 15 + }, + "v": { + "register": "Vd.16B" } }, "Architectures": [ @@ -96847,447 +300491,439 @@ ], "instructions": [ [ - "SHRN" + "MOV" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_n_u16", + "name": "vsha1cq_u32", "arguments": [ - "uint16x8_t a", - "const int n" + "uint32x4_t hash_abcd", + "uint32_t hash_e", + "uint32x4_t wk" ], "return_type": { - "value": "uint8x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "hash_abcd": { + "register": "Qd" }, - "n": { - "minimum": 1, - "maximum": 8 + "hash_e": { + "register": "Sn" + }, + "wk": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SHRN" + "SHA1C" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_n_u32", + "name": "vsha1h_u32", "arguments": [ - "uint32x4_t a", - "const int n" + "uint32_t hash_e" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "n": { - "minimum": 1, - "maximum": 16 + "hash_e": { + "register": "Sn" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SHRN" + "SHA1H" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrn_n_u64", + "name": "vsha1mq_u32", "arguments": [ - "uint64x2_t a", - "const int n" + "uint32x4_t hash_abcd", + "uint32_t hash_e", + "uint32x4_t wk" ], "return_type": { - "value": "uint32x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "hash_abcd": { + "register": "Qd" }, - "n": { - "minimum": 1, - "maximum": 32 + "hash_e": { + "register": "Sn" + }, + "wk": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SHRN" + "SHA1M" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrq_n_s16", + "name": "vsha1pq_u32", "arguments": [ - "int16x8_t a", - "const int n" + "uint32x4_t hash_abcd", + "uint32_t hash_e", + "uint32x4_t wk" ], "return_type": { - "value": "int16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "hash_abcd": { + "register": "Qd" }, - "n": { - "minimum": 1, - "maximum": 16 + "hash_e": { + "register": "Sn" + }, + "wk": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SSHR" + "SHA1P" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrq_n_s32", + "name": "vsha1su0q_u32", "arguments": [ - "int32x4_t a", - "const int n" + "uint32x4_t w0_3", + "uint32x4_t w4_7", + "uint32x4_t w8_11" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { + "w0_3": { + "register": "Vd.4S" + }, + "w4_7": { "register": "Vn.4S" }, - "n": { - "minimum": 1, - "maximum": 32 + "w8_11": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SSHR" + "SHA1SU0" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrq_n_s64", + "name": "vsha1su1q_u32", "arguments": [ - "int64x2_t a", - "const int n" + "uint32x4_t tw0_3", + "uint32x4_t w12_15" ], "return_type": { - "value": "int64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "tw0_3": { + "register": "Vd.4S" }, - "n": { - "minimum": 1, - "maximum": 64 + "w12_15": { + "register": "Vn.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SSHR" + "SHA1SU1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrq_n_s8", + "name": "vsha256h2q_u32", "arguments": [ - "int8x16_t a", - "const int n" + "uint32x4_t hash_efgh", + "uint32x4_t hash_abcd", + "uint32x4_t wk" ], "return_type": { - "value": "int8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "hash_abcd": { + "register": "Qn" }, - "n": { - "minimum": 1, - "maximum": 8 + "hash_efgh": { + "register": "Qd" + }, + "wk": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SSHR" + "SHA256H2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrq_n_u16", + "name": "vsha256hq_u32", "arguments": [ - "uint16x8_t a", - "const int n" + "uint32x4_t hash_abcd", + "uint32x4_t hash_efgh", + "uint32x4_t wk" ], "return_type": { - "value": "uint16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "hash_abcd": { + "register": "Qd" }, - "n": { - "minimum": 1, - "maximum": 16 + "hash_efgh": { + "register": "Qn" + }, + "wk": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "USHR" + "SHA256H" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrq_n_u32", + "name": "vsha256su0q_u32", "arguments": [ - "uint32x4_t a", - "const int n" + "uint32x4_t w0_3", + "uint32x4_t w4_7" ], "return_type": { "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "w0_3": { + "register": "Vd.4S" }, - "n": { - "minimum": 1, - "maximum": 32 + "w4_7": { + "register": "Vn.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "USHR" + "SHA256SU0" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrq_n_u64", + "name": "vsha256su1q_u32", "arguments": [ - "uint64x2_t a", - "const int n" + "uint32x4_t tw0_3", + "uint32x4_t w8_11", + "uint32x4_t w12_15" ], "return_type": { - "value": "uint64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "tw0_3": { + "register": "Vd.4S" }, - "n": { - "minimum": 1, - "maximum": 64 + "w12_15": { + "register": "Vm.4S" + }, + "w8_11": { + "register": "Vn.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "USHR" + "SHA256SU1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vshrq_n_u8", + "name": "vsha512h2q_u64", "arguments": [ - "uint8x16_t a", - "const int n" + "uint64x2_t sum_ab", + "uint64x2_t hash_c_", + "uint64x2_t hash_ab" ], "return_type": { - "value": "uint8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "hash_ab": { + "register": "Vm.2D" }, - "n": { - "minimum": 1, - "maximum": 8 + "hash_c_": { + "register": "Qn" + }, + "sum_ab": { + "register": "Qd" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USHR" + "SHA512H2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_p16", + "name": "vsha512hq_u64", "arguments": [ - "poly16x4_t a", - "poly16x4_t b", - "const int n" + "uint64x2_t hash_ed", + "uint64x2_t hash_gf", + "uint64x2_t kwh_kwh2" ], "return_type": { - "value": "poly16x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.4H" + "hash_ed": { + "register": "Qd" }, - "b": { - "register": "Vn.4H" + "hash_gf": { + "register": "Qn" }, - "n": { - "minimum": 0, - "maximum": 15 + "kwh_kwh2": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SLI" + "SHA512H" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_p64", + "name": "vsha512su0q_u64", "arguments": [ - "poly64x1_t a", - "poly64x1_t b", - "const int n" + "uint64x2_t w0_1", + "uint64x2_t w2_" ], "return_type": { - "value": "poly64x1_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Dd" - }, - "b": { - "register": "Dn" + "w0_1": { + "register": "Vd.2D" }, - "n": { - "minimum": 0, - "maximum": 63 + "w2_": { + "register": "Vn.2D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SLI" + "SHA512SU0" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_p8", + "name": "vsha512su1q_u64", "arguments": [ - "poly8x8_t a", - "poly8x8_t b", - "const int n" + "uint64x2_t s01_s02", + "uint64x2_t w14_15", + "uint64x2_t w9_10" ], "return_type": { - "value": "poly8x8_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "a": { - "register": "Vd.8B" + "s01_s02": { + "register": "Vd.2D" }, - "b": { - "register": "Vn.8B" + "w14_15": { + "register": "Vn.2D" }, - "n": { - "minimum": 0, - "maximum": 7 + "w9_10": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SLI" + "SHA512SU1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_s16", + "name": "vshl_n_s16", "arguments": [ "int16x4_t a", - "int16x4_t b", "const int n" ], "return_type": { @@ -97295,9 +300931,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { "register": "Vn.4H" }, "n": { @@ -97312,16 +300945,15 @@ ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_s32", + "name": "vshl_n_s32", "arguments": [ "int32x2_t a", - "int32x2_t b", "const int n" ], "return_type": { @@ -97329,9 +300961,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { "register": "Vn.2S" }, "n": { @@ -97346,16 +300975,15 @@ ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_s64", + "name": "vshl_n_s64", "arguments": [ "int64x1_t a", - "int64x1_t b", "const int n" ], "return_type": { @@ -97363,9 +300991,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { "register": "Dn" }, "n": { @@ -97380,16 +301005,15 @@ ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_s8", + "name": "vshl_n_s8", "arguments": [ "int8x8_t a", - "int8x8_t b", "const int n" ], "return_type": { @@ -97397,9 +301021,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { "register": "Vn.8B" }, "n": { @@ -97414,16 +301035,15 @@ ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_u16", + "name": "vshl_n_u16", "arguments": [ "uint16x4_t a", - "uint16x4_t b", "const int n" ], "return_type": { @@ -97431,9 +301051,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { "register": "Vn.4H" }, "n": { @@ -97448,16 +301065,15 @@ ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_u32", + "name": "vshl_n_u32", "arguments": [ "uint32x2_t a", - "uint32x2_t b", "const int n" ], "return_type": { @@ -97465,9 +301081,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { "register": "Vn.2S" }, "n": { @@ -97482,16 +301095,15 @@ ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_u64", + "name": "vshl_n_u64", "arguments": [ "uint64x1_t a", - "uint64x1_t b", "const int n" ], "return_type": { @@ -97499,9 +301111,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { "register": "Dn" }, "n": { @@ -97516,16 +301125,15 @@ ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsli_n_u8", + "name": "vshl_n_u8", "arguments": [ "uint8x8_t a", - "uint8x8_t b", "const int n" ], "return_type": { @@ -97533,9 +301141,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { "register": "Vn.8B" }, "n": { @@ -97550,95 +301155,84 @@ ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vslid_n_s64", + "name": "vshl_s16", "arguments": [ - "int64_t a", - "int64_t b", - "const int n" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "int64_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.4H" }, "b": { - "register": "Dn" - }, - "n": { - "minimum": 0, - "maximum": 63 + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SLI" + "SSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vslid_n_u64", + "name": "vshl_s32", "arguments": [ - "uint64_t a", - "uint64_t b", - "const int n" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "uint64_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.2S" }, "b": { - "register": "Dn" - }, - "n": { - "minimum": 0, - "maximum": 63 + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SLI" + "SSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_p16", + "name": "vshl_s64", "arguments": [ - "poly16x8_t a", - "poly16x8_t b", - "const int n" + "int64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Dn" }, "b": { - "register": "Vn.8H" - }, - "n": { - "minimum": 0, - "maximum": 15 + "register": "Dm" } }, "Architectures": [ @@ -97648,64 +301242,55 @@ ], "instructions": [ [ - "SLI" + "SSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_p64", + "name": "vshl_s8", "arguments": [ - "poly64x2_t a", - "poly64x2_t b", - "const int n" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "poly64x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8B" }, "b": { - "register": "Vn.2D" - }, - "n": { - "minimum": 0, - "maximum": 63 + "register": "Vm.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "SLI" + "SSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_p8", + "name": "vshl_u16", "arguments": [ - "poly8x16_t a", - "poly8x16_t b", - "const int n" + "uint16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vn.4H" }, "b": { - "register": "Vn.16B" - }, - "n": { - "minimum": 0, - "maximum": 7 + "register": "Vm.4H" } }, "Architectures": [ @@ -97715,31 +301300,26 @@ ], "instructions": [ [ - "SLI" + "USHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_s16", + "name": "vshl_u32", "arguments": [ - "int16x8_t a", - "int16x8_t b", - "const int n" + "uint32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "int16x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vn.2S" }, "b": { - "register": "Vn.8H" - }, - "n": { - "minimum": 0, - "maximum": 15 + "register": "Vm.2S" } }, "Architectures": [ @@ -97749,31 +301329,26 @@ ], "instructions": [ [ - "SLI" + "USHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_s32", + "name": "vshl_u64", "arguments": [ - "int32x4_t a", - "int32x4_t b", - "const int n" + "uint64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Dn" }, "b": { - "register": "Vn.4S" - }, - "n": { - "minimum": 0, - "maximum": 31 + "register": "Dm" } }, "Architectures": [ @@ -97783,31 +301358,26 @@ ], "instructions": [ [ - "SLI" + "USHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_s64", + "name": "vshl_u8", "arguments": [ - "int64x2_t a", - "int64x2_t b", - "const int n" + "uint8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8B" }, "b": { - "register": "Vn.2D" - }, - "n": { - "minimum": 0, - "maximum": 63 + "register": "Vm.8B" } }, "Architectures": [ @@ -97817,282 +301387,221 @@ ], "instructions": [ [ - "SLI" + "USHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_s8", + "name": "vshld_n_s64", "arguments": [ - "int8x16_t a", - "int8x16_t b", + "int64_t a", "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.16B" + "register": "Dn" }, "n": { "minimum": 0, - "maximum": 7 + "maximum": 63 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_u16", + "name": "vshld_n_u64", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", + "uint64_t a", "const int n" ], "return_type": { - "value": "uint16x8_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" + "register": "Dn" }, "n": { "minimum": 0, - "maximum": 15 + "maximum": 63 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SLI" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_u32", + "name": "vshld_s64", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "const int n" + "int64_t a", + "int64_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Dn" }, "b": { - "register": "Vn.4S" - }, - "n": { - "minimum": 0, - "maximum": 31 + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SLI" + "SSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_u64", + "name": "vshld_u64", "arguments": [ - "uint64x2_t a", - "uint64x2_t b", - "const int n" + "uint64_t a", + "int64_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Dn" }, "b": { - "register": "Vn.2D" - }, - "n": { - "minimum": 0, - "maximum": 63 + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SLI" + "USHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsliq_n_u8", + "name": "vshll_high_n_s16", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", + "int16x8_t a", "const int n" ], "return_type": { - "value": "uint8x16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.16B" + "register": "Vn.8H" }, "n": { "minimum": 0, - "maximum": 7 + "maximum": 16 } }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "SLI" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vsm3partw1q_u32", - "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c" - ], - "return_type": { - "value": "uint32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vd.4S" - }, - "b": {}, - "c": {} - }, "Architectures": [ "A64" ], "instructions": [ [ - "SM3PARTW1" + "SSHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsm3partw2q_u32", + "name": "vshll_high_n_s32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c" + "int32x4_t a", + "const int n" ], "return_type": { - "value": "uint32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.4S" }, - "b": {}, - "c": {} + "n": { + "minimum": 0, + "maximum": 32 + } }, "Architectures": [ "A64" ], "instructions": [ [ - "SM3PARTW2" + "SSHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsm3ss1q_u32", + "name": "vshll_high_n_s8", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c" + "int8x16_t a", + "const int n" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.16B" }, - "b": {}, - "c": {} + "n": { + "minimum": 0, + "maximum": 8 + } }, "Architectures": [ "A64" ], "instructions": [ [ - "SM3SS1" + "SSHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsm3tt1aq_u32", + "name": "vshll_high_n_u16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c", - "const int imm2" + "uint16x8_t a", + "const int n" ], "return_type": { "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.8H" }, - "b": {}, - "c": {}, - "imm2": { + "n": { "minimum": 0, - "maximum": 3 + "maximum": 16 } }, "Architectures": [ @@ -98100,31 +301609,27 @@ ], "instructions": [ [ - "SM3TT1A" + "USHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsm3tt1bq_u32", + "name": "vshll_high_n_u32", "arguments": [ "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c", - "const int imm2" + "const int n" ], "return_type": { - "value": "uint32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.4S" }, - "b": {}, - "c": {}, - "imm2": { + "n": { "minimum": 0, - "maximum": 3 + "maximum": 32 } }, "Architectures": [ @@ -98132,31 +301637,27 @@ ], "instructions": [ [ - "SM3TT1B" + "USHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsm3tt2aq_u32", + "name": "vshll_high_n_u8", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c", - "const int imm2" + "uint8x16_t a", + "const int n" ], "return_type": { - "value": "uint32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.16B" }, - "b": {}, - "c": {}, - "imm2": { + "n": { "minimum": 0, - "maximum": 3 + "maximum": 8 } }, "Architectures": [ @@ -98164,584 +301665,667 @@ ], "instructions": [ [ - "SM3TT2A" + "USHLL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsm3tt2bq_u32", + "name": "vshll_n_s16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", - "uint32x4_t c", - "const int imm2" + "int16x4_t a", + "const int n" ], "return_type": { - "value": "uint32x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.4H" }, - "b": {}, - "c": {}, - "imm2": { + "n": { "minimum": 0, - "maximum": 3 + "maximum": 16 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SM3TT2B" + "SSHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsm4ekeyq_u32", + "name": "vshll_n_s32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int32x2_t a", + "const int n" ], "return_type": { - "value": "uint32x4_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" }, - "b": {} + "n": { + "minimum": 0, + "maximum": 32 + } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SM4EKEY" + "SSHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsm4eq_u32", - "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "name": "vshll_n_s8", + "arguments": [ + "int8x8_t a", + "const int n" ], "return_type": { - "value": "uint32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.8B" }, - "b": {} + "n": { + "minimum": 0, + "maximum": 8 + } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SM4E" + "SSHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqadd_u16", + "name": "vshll_n_u16", "arguments": [ "uint16x4_t a", - "int16x4_t b" + "const int n" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { "register": "Vn.4H" + }, + "n": { + "minimum": 0, + "maximum": 16 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "USHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqadd_u32", + "name": "vshll_n_u32", "arguments": [ "uint32x2_t a", - "int32x2_t b" + "const int n" ], "return_type": { - "value": "uint32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { "register": "Vn.2S" + }, + "n": { + "minimum": 0, + "maximum": 32 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "USHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqadd_u64", + "name": "vshll_n_u8", "arguments": [ - "uint64x1_t a", - "int64x1_t b" + "uint8x8_t a", + "const int n" ], "return_type": { - "value": "uint64x1_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.8B" }, - "b": { - "register": "Dn" + "n": { + "minimum": 0, + "maximum": 8 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "USHLL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqadd_u8", + "name": "vshlq_n_s16", "arguments": [ - "uint8x8_t a", - "int8x8_t b" + "int16x8_t a", + "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.8H" }, - "b": { - "register": "Vn.8B" + "n": { + "minimum": 0, + "maximum": 15 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqaddb_u8", + "name": "vshlq_n_s32", "arguments": [ - "uint8_t a", - "int8_t b" + "int32x4_t a", + "const int n" ], "return_type": { - "value": "uint8_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Bd" + "register": "Vn.4S" }, - "b": { - "register": "Bn" + "n": { + "minimum": 0, + "maximum": 31 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqaddd_u64", + "name": "vshlq_n_s64", "arguments": [ - "uint64_t a", - "int64_t b" + "int64x2_t a", + "const int n" ], "return_type": { - "value": "uint64_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.2D" }, - "b": { - "register": "Dn" + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqaddh_u16", + "name": "vshlq_n_s8", "arguments": [ - "uint16_t a", - "int16_t b" + "int8x16_t a", + "const int n" ], "return_type": { - "value": "uint16_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Hd" + "register": "Vn.16B" }, - "b": { - "register": "Hn" + "n": { + "minimum": 0, + "maximum": 7 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqaddq_u16", + "name": "vshlq_n_u16", "arguments": [ "uint16x8_t a", - "int16x8_t b" + "const int n" ], "return_type": { "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { "register": "Vn.8H" + }, + "n": { + "minimum": 0, + "maximum": 15 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqaddq_u32", + "name": "vshlq_n_u32", "arguments": [ "uint32x4_t a", - "int32x4_t b" + "const int n" ], "return_type": { "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4S" + }, + "n": { + "minimum": 0, + "maximum": 31 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqaddq_u64", + "name": "vshlq_n_u64", "arguments": [ "uint64x2_t a", - "int64x2_t b" + "const int n" ], "return_type": { "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.2D" + }, + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqaddq_u8", + "name": "vshlq_n_u8", "arguments": [ "uint8x16_t a", - "int8x16_t b" + "const int n" ], "return_type": { "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "b": { "register": "Vn.16B" + }, + "n": { + "minimum": 0, + "maximum": 7 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "SHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqadds_u32", + "name": "vshlq_s16", "arguments": [ - "uint32_t a", - "int32_t b" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "uint32_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.8H" }, "b": { - "register": "Sn" + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USQADD" + "SSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqrt_f16", + "name": "vshlq_s32", "arguments": [ - "float16x4_t a" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "float16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FSQRT" + "SSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqrt_f32", + "name": "vshlq_s64", "arguments": [ - "float32x2_t a" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "float32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FSQRT" + "SSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqrt_f64", + "name": "vshlq_s8", "arguments": [ - "float64x1_t a" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "float64x1_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FSQRT" + "SSHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqrth_f16", + "name": "vshlq_u16", "arguments": [ - "float16_t a" + "uint16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "float16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hn" + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FSQRT" + "USHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqrtq_f16", + "name": "vshlq_u32", "arguments": [ - "float16x8_t a" + "uint32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "float16x8_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FSQRT" + "USHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqrtq_f32", + "name": "vshlq_u64", "arguments": [ - "float32x4_t a" + "uint64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "float32x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FSQRT" + "USHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsqrtq_f64", + "name": "vshlq_u8", "arguments": [ - "float64x2_t a" + "uint8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "float64x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FSQRT" + "USHL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsra_n_s16", + "name": "vshr_n_s16", "arguments": [ "int16x4_t a", - "int16x4_t b", "const int n" ], "return_type": { @@ -98749,9 +302333,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { "register": "Vn.4H" }, "n": { @@ -98766,16 +302347,15 @@ ], "instructions": [ [ - "SSRA" + "SSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsra_n_s32", + "name": "vshr_n_s32", "arguments": [ "int32x2_t a", - "int32x2_t b", "const int n" ], "return_type": { @@ -98783,9 +302363,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { "register": "Vn.2S" }, "n": { @@ -98800,16 +302377,15 @@ ], "instructions": [ [ - "SSRA" + "SSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsra_n_s64", + "name": "vshr_n_s64", "arguments": [ "int64x1_t a", - "int64x1_t b", "const int n" ], "return_type": { @@ -98817,9 +302393,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { "register": "Dn" }, "n": { @@ -98834,16 +302407,15 @@ ], "instructions": [ [ - "SSRA" + "SSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsra_n_s8", + "name": "vshr_n_s8", "arguments": [ "int8x8_t a", - "int8x8_t b", "const int n" ], "return_type": { @@ -98851,9 +302423,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { "register": "Vn.8B" }, "n": { @@ -98868,16 +302437,15 @@ ], "instructions": [ [ - "SSRA" + "SSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsra_n_u16", + "name": "vshr_n_u16", "arguments": [ "uint16x4_t a", - "uint16x4_t b", "const int n" ], "return_type": { @@ -98885,9 +302453,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { "register": "Vn.4H" }, "n": { @@ -98902,16 +302467,15 @@ ], "instructions": [ [ - "USRA" + "USHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsra_n_u32", + "name": "vshr_n_u32", "arguments": [ "uint32x2_t a", - "uint32x2_t b", "const int n" ], "return_type": { @@ -98919,9 +302483,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { "register": "Vn.2S" }, "n": { @@ -98936,16 +302497,15 @@ ], "instructions": [ [ - "USRA" + "USHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsra_n_u64", + "name": "vshr_n_u64", "arguments": [ "uint64x1_t a", - "uint64x1_t b", "const int n" ], "return_type": { @@ -98953,9 +302513,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { "register": "Dn" }, "n": { @@ -98970,16 +302527,15 @@ ], "instructions": [ [ - "USRA" + "USHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsra_n_u8", + "name": "vshr_n_u8", "arguments": [ "uint8x8_t a", - "uint8x8_t b", "const int n" ], "return_type": { @@ -98987,9 +302543,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { "register": "Vn.8B" }, "n": { @@ -99004,16 +302557,15 @@ ], "instructions": [ [ - "USRA" + "USHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsrad_n_s64", + "name": "vshrd_n_s64", "arguments": [ "int64_t a", - "int64_t b", "const int n" ], "return_type": { @@ -99021,9 +302573,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { "register": "Dn" }, "n": { @@ -99036,16 +302585,15 @@ ], "instructions": [ [ - "SSRA" + "SSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsrad_n_u64", + "name": "vshrd_n_u64", "arguments": [ "uint64_t a", - "uint64_t b", "const int n" ], "return_type": { @@ -99053,9 +302601,6 @@ }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { "register": "Dn" }, "n": { @@ -99068,152 +302613,144 @@ ], "instructions": [ [ - "USRA" + "USHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsraq_n_s16", + "name": "vshrn_high_n_s16", "arguments": [ + "int8x8_t r", "int16x8_t a", - "int16x8_t b", "const int n" ], "return_type": { - "value": "int16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { "register": "Vn.8H" }, "n": { "minimum": 1, - "maximum": 16 + "maximum": 8 + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSRA" + "SHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsraq_n_s32", + "name": "vshrn_high_n_s32", "arguments": [ + "int16x4_t r", "int32x4_t a", - "int32x4_t b", "const int n" ], "return_type": { - "value": "int32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { "register": "Vn.4S" }, "n": { "minimum": 1, - "maximum": 32 + "maximum": 16 + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSRA" + "SHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsraq_n_s64", + "name": "vshrn_high_n_s64", "arguments": [ + "int32x2_t r", "int64x2_t a", - "int64x2_t b", "const int n" ], "return_type": { - "value": "int64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { "register": "Vn.2D" }, "n": { "minimum": 1, - "maximum": 64 + "maximum": 32 + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSRA" + "SHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsraq_n_s8", + "name": "vshrn_high_n_u16", "arguments": [ - "int8x16_t a", - "int8x16_t b", + "uint8x8_t r", + "uint16x8_t a", "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.16B" + "register": "Vn.8H" }, "n": { "minimum": 1, "maximum": 8 + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSRA" + "SHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsraq_n_u16", + "name": "vshrn_high_n_u32", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", + "uint16x4_t r", + "uint32x4_t a", "const int n" ], "return_type": { @@ -99221,33 +302758,31 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" - }, - "b": { - "register": "Vn.8H" + "register": "Vn.4S" }, "n": { "minimum": 1, "maximum": 16 + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USRA" + "SHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsraq_n_u32", + "name": "vshrn_high_n_u64", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", + "uint32x2_t r", + "uint64x2_t a", "const int n" ], "return_type": { @@ -99255,48 +302790,42 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" - }, - "b": { - "register": "Vn.4S" + "register": "Vn.2D" }, "n": { "minimum": 1, "maximum": 32 + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USRA" + "SHRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsraq_n_u64", + "name": "vshrn_n_s16", "arguments": [ - "uint64x2_t a", - "uint64x2_t b", + "int16x8_t a", "const int n" ], "return_type": { - "value": "uint64x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" - }, - "b": { - "register": "Vn.2D" + "register": "Vn.8H" }, "n": { "minimum": 1, - "maximum": 64 + "maximum": 8 } }, "Architectures": [ @@ -99306,31 +302835,27 @@ ], "instructions": [ [ - "USRA" + "SHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsraq_n_u8", + "name": "vshrn_n_s32", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", + "int32x4_t a", "const int n" ], "return_type": { - "value": "uint8x16_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" - }, - "b": { - "register": "Vn.16B" + "register": "Vn.4S" }, "n": { "minimum": 1, - "maximum": 8 + "maximum": 16 } }, "Architectures": [ @@ -99340,31 +302865,27 @@ ], "instructions": [ [ - "USRA" + "SHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_p16", + "name": "vshrn_n_s64", "arguments": [ - "poly16x4_t a", - "poly16x4_t b", + "int64x2_t a", "const int n" ], "return_type": { - "value": "poly16x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" + "register": "Vn.2D" }, "n": { "minimum": 1, - "maximum": 16 + "maximum": 32 } }, "Architectures": [ @@ -99374,64 +302895,57 @@ ], "instructions": [ [ - "SRI" + "SHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_p64", + "name": "vshrn_n_u16", "arguments": [ - "poly64x1_t a", - "poly64x1_t b", + "uint16x8_t a", "const int n" ], "return_type": { - "value": "poly64x1_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { - "register": "Dn" + "register": "Vn.8H" }, "n": { "minimum": 1, - "maximum": 64 + "maximum": 8 } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "SRI" + "SHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_p8", + "name": "vshrn_n_u32", "arguments": [ - "poly8x8_t a", - "poly8x8_t b", + "uint32x4_t a", "const int n" ], "return_type": { - "value": "poly8x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.8B" + "register": "Vn.4S" }, "n": { "minimum": 1, - "maximum": 8 + "maximum": 16 } }, "Architectures": [ @@ -99441,31 +302955,27 @@ ], "instructions": [ [ - "SRI" + "SHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_s16", + "name": "vshrn_n_u64", "arguments": [ - "int16x4_t a", - "int16x4_t b", + "uint64x2_t a", "const int n" ], "return_type": { - "value": "int16x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" + "register": "Vn.2D" }, "n": { "minimum": 1, - "maximum": 16 + "maximum": 32 } }, "Architectures": [ @@ -99475,31 +302985,27 @@ ], "instructions": [ [ - "SRI" + "SHRN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_s32", + "name": "vshrq_n_s16", "arguments": [ - "int32x2_t a", - "int32x2_t b", + "int16x8_t a", "const int n" ], "return_type": { - "value": "int32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" + "register": "Vn.8H" }, "n": { "minimum": 1, - "maximum": 32 + "maximum": 16 } }, "Architectures": [ @@ -99509,31 +303015,27 @@ ], "instructions": [ [ - "SRI" + "SSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_s64", + "name": "vshrq_n_s32", "arguments": [ - "int64x1_t a", - "int64x1_t b", + "int32x4_t a", "const int n" ], "return_type": { - "value": "int64x1_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { - "register": "Dn" + "register": "Vn.4S" }, "n": { "minimum": 1, - "maximum": 64 + "maximum": 32 } }, "Architectures": [ @@ -99543,31 +303045,27 @@ ], "instructions": [ [ - "SRI" + "SSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_s8", + "name": "vshrq_n_s64", "arguments": [ - "int8x8_t a", - "int8x8_t b", + "int64x2_t a", "const int n" ], "return_type": { - "value": "int8x8_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.8B" + "register": "Vn.2D" }, "n": { "minimum": 1, - "maximum": 8 + "maximum": 64 } }, "Architectures": [ @@ -99577,31 +303075,27 @@ ], "instructions": [ [ - "SRI" + "SSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_u16", + "name": "vshrq_n_s8", "arguments": [ - "uint16x4_t a", - "uint16x4_t b", + "int8x16_t a", "const int n" ], "return_type": { - "value": "uint16x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" - }, - "b": { - "register": "Vn.4H" + "register": "Vn.16B" }, "n": { "minimum": 1, - "maximum": 16 + "maximum": 8 } }, "Architectures": [ @@ -99611,31 +303105,27 @@ ], "instructions": [ [ - "SRI" + "SSHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_u32", + "name": "vshrq_n_u16", "arguments": [ - "uint32x2_t a", - "uint32x2_t b", + "uint16x8_t a", "const int n" ], "return_type": { - "value": "uint32x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" - }, - "b": { - "register": "Vn.2S" + "register": "Vn.8H" }, "n": { "minimum": 1, - "maximum": 32 + "maximum": 16 } }, "Architectures": [ @@ -99645,31 +303135,27 @@ ], "instructions": [ [ - "SRI" + "USHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_u64", + "name": "vshrq_n_u32", "arguments": [ - "uint64x1_t a", - "uint64x1_t b", + "uint32x4_t a", "const int n" ], "return_type": { - "value": "uint64x1_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { - "register": "Dn" + "register": "Vn.4S" }, "n": { "minimum": 1, - "maximum": 64 + "maximum": 32 } }, "Architectures": [ @@ -99679,31 +303165,27 @@ ], "instructions": [ [ - "SRI" + "USHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsri_n_u8", + "name": "vshrq_n_u64", "arguments": [ - "uint8x8_t a", - "uint8x8_t b", + "uint64x2_t a", "const int n" ], "return_type": { - "value": "uint8x8_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" - }, - "b": { - "register": "Vn.8B" + "register": "Vn.2D" }, "n": { "minimum": 1, - "maximum": 8 + "maximum": 64 } }, "Architectures": [ @@ -99713,162 +303195,162 @@ ], "instructions": [ [ - "SRI" + "USHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsrid_n_s64", + "name": "vshrq_n_u8", "arguments": [ - "int64_t a", - "int64_t b", + "uint8x16_t a", "const int n" ], "return_type": { - "value": "int64_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" - }, - "b": { - "register": "Dn" + "register": "Vn.16B" }, "n": { "minimum": 1, - "maximum": 64 + "maximum": 8 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SRI" + "USHR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsrid_n_u64", + "name": "vsli_n_p16", "arguments": [ - "uint64_t a", - "uint64_t b", + "poly16x4_t a", + "poly16x4_t b", "const int n" ], "return_type": { - "value": "uint64_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vd.4H" }, "b": { - "register": "Dn" + "register": "Vn.4H" }, "n": { - "minimum": 1, - "maximum": 64 + "minimum": 0, + "maximum": 15 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_p16", + "name": "vsli_n_p64", "arguments": [ - "poly16x8_t a", - "poly16x8_t b", + "poly64x1_t a", + "poly64x1_t b", "const int n" ], "return_type": { - "value": "poly16x8_t" + "value": "poly64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Dd" }, "b": { - "register": "Vn.8H" + "register": "Dn" }, "n": { - "minimum": 1, - "maximum": 16 + "minimum": 0, + "maximum": 63 } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_p64", + "name": "vsli_n_p8", "arguments": [ - "poly64x2_t a", - "poly64x2_t b", + "poly8x8_t a", + "poly8x8_t b", "const int n" ], "return_type": { - "value": "poly64x2_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.8B" }, "b": { - "register": "Vn.2D" + "register": "Vn.8B" }, "n": { - "minimum": 1, - "maximum": 64 + "minimum": 0, + "maximum": 7 } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_p8", + "name": "vsli_n_s16", "arguments": [ - "poly8x16_t a", - "poly8x16_t b", + "int16x4_t a", + "int16x4_t b", "const int n" ], "return_type": { - "value": "poly8x16_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4H" }, "b": { - "register": "Vn.16B" + "register": "Vn.4H" }, "n": { - "minimum": 1, - "maximum": 8 + "minimum": 0, + "maximum": 15 } }, "Architectures": [ @@ -99878,31 +303360,31 @@ ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_s16", + "name": "vsli_n_s32", "arguments": [ - "int16x8_t a", - "int16x8_t b", + "int32x2_t a", + "int32x2_t b", "const int n" ], "return_type": { - "value": "int16x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2S" }, "b": { - "register": "Vn.8H" + "register": "Vn.2S" }, "n": { - "minimum": 1, - "maximum": 16 + "minimum": 0, + "maximum": 31 } }, "Architectures": [ @@ -99912,31 +303394,31 @@ ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_s32", + "name": "vsli_n_s64", "arguments": [ - "int32x4_t a", - "int32x4_t b", + "int64x1_t a", + "int64x1_t b", "const int n" ], "return_type": { - "value": "int32x4_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Dd" }, "b": { - "register": "Vn.4S" + "register": "Dn" }, "n": { - "minimum": 1, - "maximum": 32 + "minimum": 0, + "maximum": 63 } }, "Architectures": [ @@ -99946,31 +303428,31 @@ ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_s64", + "name": "vsli_n_s8", "arguments": [ - "int64x2_t a", - "int64x2_t b", + "int8x8_t a", + "int8x8_t b", "const int n" ], "return_type": { - "value": "int64x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.8B" }, "b": { - "register": "Vn.2D" + "register": "Vn.8B" }, "n": { - "minimum": 1, - "maximum": 64 + "minimum": 0, + "maximum": 7 } }, "Architectures": [ @@ -99980,31 +303462,31 @@ ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_s8", + "name": "vsli_n_u16", "arguments": [ - "int8x16_t a", - "int8x16_t b", + "uint16x4_t a", + "uint16x4_t b", "const int n" ], "return_type": { - "value": "int8x16_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vd.4H" }, "b": { - "register": "Vn.16B" + "register": "Vn.4H" }, "n": { - "minimum": 1, - "maximum": 8 + "minimum": 0, + "maximum": 15 } }, "Architectures": [ @@ -100014,31 +303496,31 @@ ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_u16", + "name": "vsli_n_u32", "arguments": [ - "uint16x8_t a", - "uint16x8_t b", + "uint32x2_t a", + "uint32x2_t b", "const int n" ], "return_type": { - "value": "uint16x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vd.2S" }, "b": { - "register": "Vn.8H" + "register": "Vn.2S" }, "n": { - "minimum": 1, - "maximum": 16 + "minimum": 0, + "maximum": 31 } }, "Architectures": [ @@ -100048,31 +303530,31 @@ ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_u32", + "name": "vsli_n_u64", "arguments": [ - "uint32x4_t a", - "uint32x4_t b", + "uint64x1_t a", + "uint64x1_t b", "const int n" ], "return_type": { - "value": "uint32x4_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Dd" }, "b": { - "register": "Vn.4S" + "register": "Dn" }, "n": { - "minimum": 1, - "maximum": 32 + "minimum": 0, + "maximum": 63 } }, "Architectures": [ @@ -100082,31 +303564,31 @@ ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_u64", + "name": "vsli_n_u8", "arguments": [ - "uint64x2_t a", - "uint64x2_t b", + "uint8x8_t a", + "uint8x8_t b", "const int n" ], "return_type": { - "value": "uint64x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vd.8B" }, "b": { - "register": "Vn.2D" + "register": "Vn.8B" }, "n": { - "minimum": 1, - "maximum": 64 + "minimum": 0, + "maximum": 7 } }, "Architectures": [ @@ -100116,89 +303598,95 @@ ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsriq_n_u8", + "name": "vslid_n_s64", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", + "int64_t a", + "int64_t b", "const int n" ], "return_type": { - "value": "uint8x16_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Dd" }, "b": { - "register": "Vn.16B" + "register": "Dn" }, "n": { - "minimum": 1, - "maximum": 8 + "minimum": 0, + "maximum": 63 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SRI" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f16", + "name": "vslid_n_u64", "arguments": [ - "float16_t * ptr", - "float16x4_t val" + "uint64_t a", + "uint64_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint64_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt.4H" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f16_x2", + "name": "vsliq_n_p16", "arguments": [ - "float16_t * ptr", - "float16x4x2_t val" + "poly16x8_t a", + "poly16x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8H" }, - "val": { - "register": "Vt2.4H" + "b": { + "register": "Vn.8H" + }, + "n": { + "minimum": 0, + "maximum": 15 } }, "Architectures": [ @@ -100208,55 +303696,64 @@ ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f16_x3", + "name": "vsliq_n_p64", "arguments": [ - "float16_t * ptr", - "float16x4x3_t val" + "poly64x2_t a", + "poly64x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "poly64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2D" }, - "val": { - "register": "Vt3.4H" + "b": { + "register": "Vn.2D" + }, + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f16_x4", + "name": "vsliq_n_p8", "arguments": [ - "float16_t * ptr", - "float16x4x4_t val" + "poly8x16_t a", + "poly8x16_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.16B" }, - "val": { - "register": "Vt4.4H" + "b": { + "register": "Vn.16B" + }, + "n": { + "minimum": 0, + "maximum": 7 } }, "Architectures": [ @@ -100266,26 +303763,31 @@ ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f32", + "name": "vsliq_n_s16", "arguments": [ - "float32_t * ptr", - "float32x2_t val" + "int16x8_t a", + "int16x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8H" }, - "val": { - "register": "Vt.2S" + "b": { + "register": "Vn.8H" + }, + "n": { + "minimum": 0, + "maximum": 15 } }, "Architectures": [ @@ -100295,26 +303797,31 @@ ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f32_x2", + "name": "vsliq_n_s32", "arguments": [ - "float32_t * ptr", - "float32x2x2_t val" + "int32x4_t a", + "int32x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int32x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4S" }, - "val": { - "register": "Vt2.2S" + "b": { + "register": "Vn.4S" + }, + "n": { + "minimum": 0, + "maximum": 31 } }, "Architectures": [ @@ -100324,26 +303831,31 @@ ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f32_x3", + "name": "vsliq_n_s64", "arguments": [ - "float32_t * ptr", - "float32x2x3_t val" + "int64x2_t a", + "int64x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2D" }, - "val": { - "register": "Vt3.2S" + "b": { + "register": "Vn.2D" + }, + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ @@ -100353,26 +303865,31 @@ ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f32_x4", + "name": "vsliq_n_s8", "arguments": [ - "float32_t * ptr", - "float32x2x4_t val" + "int8x16_t a", + "int8x16_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int8x16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.16B" }, - "val": { - "register": "Vt4.2S" + "b": { + "register": "Vn.16B" + }, + "n": { + "minimum": 0, + "maximum": 7 } }, "Architectures": [ @@ -100382,207 +303899,228 @@ ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f64", + "name": "vsliq_n_u16", "arguments": [ - "float64_t * ptr", - "float64x1_t val" + "uint16x8_t a", + "uint16x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8H" }, - "val": { - "register": "Vt.1D" + "b": { + "register": "Vn.8H" + }, + "n": { + "minimum": 0, + "maximum": 15 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f64_x2", + "name": "vsliq_n_u32", "arguments": [ - "float64_t * ptr", - "float64x1x2_t val" + "uint32x4_t a", + "uint32x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4S" }, - "val": { - "register": "Vt2.1D" + "b": { + "register": "Vn.4S" + }, + "n": { + "minimum": 0, + "maximum": 31 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f64_x3", + "name": "vsliq_n_u64", "arguments": [ - "float64_t * ptr", - "float64x1x3_t val" + "uint64x2_t a", + "uint64x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2D" }, - "val": { - "register": "Vt3.1D" + "b": { + "register": "Vn.2D" + }, + "n": { + "minimum": 0, + "maximum": 63 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_f64_x4", + "name": "vsliq_n_u8", "arguments": [ - "float64_t * ptr", - "float64x1x4_t val" + "uint8x16_t a", + "uint8x16_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.16B" }, - "val": { - "register": "Vt4.1D" + "b": { + "register": "Vn.16B" + }, + "n": { + "minimum": 0, + "maximum": 7 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST1" + "SLI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_f16", + "name": "vsm3partw1q_u32", "arguments": [ - "float16_t * ptr", - "float16x4_t val", - "const int lane" + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "a": { + "register": "Vd.4S" }, - "ptr": { - "register": "Xn" + "b": { + "register": "Vn.4S" }, - "val": { - "register": "Vt.4H" + "c": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SM3PARTW1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_f32", + "name": "vsm3partw2q_u32", "arguments": [ - "float32_t * ptr", - "float32x2_t val", - "const int lane" + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vd.4S" }, - "ptr": { - "register": "Xn" + "b": { + "register": "Vn.4S" }, - "val": { - "register": "Vt.2S" + "c": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SM3PARTW2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_f64", + "name": "vsm3ss1q_u32", "arguments": [ - "float64_t * ptr", - "float64x1_t val", - "const int lane" + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "a": { + "register": "Vn.4S" }, - "ptr": { - "register": "Xn" + "b": { + "register": "Vm.4S" }, - "val": { - "register": "Vt.1D" + "c": { + "register": "Va.4S" } }, "Architectures": [ @@ -100590,801 +304128,715 @@ ], "instructions": [ [ - "ST1" + "SM3SS1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_p16", + "name": "vsm3tt1aq_u32", "arguments": [ - "poly16_t * ptr", - "poly16x4_t val", - "const int lane" + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c", + "const int imm2" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "a": { + "register": "Vd.4S" }, - "ptr": { - "register": "Xn" + "b": { + "register": "Vn.4S" }, - "val": { - "register": "Vt.4H" + "c": { + "register": "Vm.4S" + }, + "imm2": { + "minimum": 0, + "maximum": 3 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SM3TT1A" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_p64", + "name": "vsm3tt1bq_u32", "arguments": [ - "poly64_t * ptr", - "poly64x1_t val", - "const int lane" + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c", + "const int imm2" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 + "a": { + "register": "Vd.4S" }, - "ptr": { - "register": "Xn" + "b": { + "register": "Vn.4S" }, - "val": { - "register": "Vt.1D" + "c": { + "register": "Vm.4S" + }, + "imm2": { + "minimum": 0, + "maximum": 3 } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "ST1" + "SM3TT1B" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_p8", + "name": "vsm3tt2aq_u32", "arguments": [ - "poly8_t * ptr", - "poly8x8_t val", - "const int lane" + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c", + "const int imm2" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 + "a": { + "register": "Vd.4S" }, - "ptr": { - "register": "Xn" + "b": { + "register": "Vn.4S" }, - "val": { - "register": "Vt.8B" + "c": { + "register": "Vm.4S" + }, + "imm2": { + "minimum": 0, + "maximum": 3 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SM3TT2A" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_s16", + "name": "vsm3tt2bq_u32", "arguments": [ - "int16_t * ptr", - "int16x4_t val", - "const int lane" + "uint32x4_t a", + "uint32x4_t b", + "uint32x4_t c", + "const int imm2" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 + "a": { + "register": "Vd.4S" }, - "ptr": { - "register": "Xn" + "b": { + "register": "Vn.4S" }, - "val": { - "register": "Vt.4H" + "c": { + "register": "Vm.4S" + }, + "imm2": { + "minimum": 0, + "maximum": 3 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SM3TT2B" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_s32", + "name": "vsm4ekeyq_u32", "arguments": [ - "int32_t * ptr", - "int32x2_t val", - "const int lane" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" + "a": { + "register": "Vn.4S" }, - "val": { - "register": "Vt.2S" + "b": { + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SM4EKEY" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_s64", + "name": "vsm4eq_u32", "arguments": [ - "int64_t * ptr", - "int64x1_t val", - "const int lane" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4S" }, - "val": { - "register": "Vt.1D" + "b": { + "register": "Vn.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SM4E" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_s8", + "name": "vsqadd_u16", "arguments": [ - "int8_t * ptr", - "int8x8_t val", - "const int lane" + "uint16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "void" + "value": "uint16x4_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4H" }, - "val": { - "register": "Vt.8B" + "b": { + "register": "Vn.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_u16", + "name": "vsqadd_u32", "arguments": [ - "uint16_t * ptr", - "uint16x4_t val", - "const int lane" + "uint32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "void" + "value": "uint32x2_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2S" }, - "val": { - "register": "Vt.4H" + "b": { + "register": "Vn.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_u32", + "name": "vsqadd_u64", "arguments": [ - "uint32_t * ptr", - "uint32x2_t val", - "const int lane" + "uint64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "void" + "value": "uint64x1_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt.2S" + "b": { + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_u64", + "name": "vsqadd_u8", "arguments": [ - "uint64_t * ptr", - "uint64x1_t val", - "const int lane" + "uint8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "void" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8B" }, - "val": { - "register": "Vt.1D" + "b": { + "register": "Vn.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_lane_u8", + "name": "vsqaddb_u8", "arguments": [ - "uint8_t * ptr", - "uint8x8_t val", - "const int lane" + "uint8_t a", + "int8_t b" ], "return_type": { - "value": "void" + "value": "uint8_t" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, - "ptr": { - "register": "Xn" + "a": { + "register": "Bd" }, - "val": { - "register": "Vt.8B" + "b": { + "register": "Bn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p16", + "name": "vsqaddd_u64", "arguments": [ - "poly16_t * ptr", - "poly16x4_t val" + "uint64_t a", + "int64_t b" ], "return_type": { - "value": "void" + "value": "uint64_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt.4H" + "b": { + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p16_x2", + "name": "vsqaddh_u16", "arguments": [ - "poly16_t * ptr", - "poly16x4x2_t val" + "uint16_t a", + "int16_t b" ], "return_type": { - "value": "void" + "value": "uint16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Hd" }, - "val": { - "register": "Vt2.4H" + "b": { + "register": "Hn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p16_x3", + "name": "vsqaddq_u16", "arguments": [ - "poly16_t * ptr", - "poly16x4x3_t val" + "uint16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "void" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8H" }, - "val": { - "register": "Vt3.4H" + "b": { + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p16_x4", + "name": "vsqaddq_u32", "arguments": [ - "poly16_t * ptr", - "poly16x4x4_t val" + "uint32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4S" }, - "val": { - "register": "Vt4.4H" + "b": { + "register": "Vn.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p64", + "name": "vsqaddq_u64", "arguments": [ - "poly64_t * ptr", - "poly64x1_t val" + "uint64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "void" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2D" }, - "val": { - "register": "Vt.1D" + "b": { + "register": "Vn.2D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p64_x2", + "name": "vsqaddq_u8", "arguments": [ - "poly64_t * ptr", - "poly64x1x2_t val" + "uint8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "void" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.16B" }, - "val": { - "register": "Vt2.1D" + "b": { + "register": "Vn.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p64_x3", + "name": "vsqadds_u32", "arguments": [ - "poly64_t * ptr", - "poly64x1x3_t val" + "uint32_t a", + "int32_t b" ], "return_type": { - "value": "void" + "value": "uint32_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Sd" }, - "val": { - "register": "Vt3.1D" + "b": { + "register": "Sn" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "ST1" + "USQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p64_x4", + "name": "vsqrt_f16", "arguments": [ - "poly64_t * ptr", - "poly64x1x4_t val" + "float16x4_t a" ], "return_type": { - "value": "void" + "value": "float16x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" - }, - "val": { - "register": "Vt4.1D" + "a": { + "register": "Vn.4H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "ST1" + "FSQRT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p8", + "name": "vsqrt_f32", "arguments": [ - "poly8_t * ptr", - "poly8x8_t val" + "float32x2_t a" ], "return_type": { - "value": "void" + "value": "float32x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" - }, - "val": { - "register": "Vt.8B" + "a": { + "register": "Vn.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "FSQRT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p8_x2", + "name": "vsqrt_f64", "arguments": [ - "poly8_t * ptr", - "poly8x8x2_t val" + "float64x1_t a" ], "return_type": { - "value": "void" + "value": "float64x1_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" - }, - "val": { - "register": "Vt2.8B" + "a": { + "register": "Dn" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "FSQRT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p8_x3", + "name": "vsqrth_f16", "arguments": [ - "poly8_t * ptr", - "poly8x8x3_t val" + "float16_t a" ], "return_type": { - "value": "void" + "value": "float16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" - }, - "val": { - "register": "Vt3.8B" + "a": { + "register": "Hn" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ST1" + "FSQRT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_p8_x4", + "name": "vsqrtq_f16", "arguments": [ - "poly8_t * ptr", - "poly8x8x4_t val" + "float16x8_t a" ], "return_type": { - "value": "void" + "value": "float16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" - }, - "val": { - "register": "Vt4.8B" + "a": { + "register": "Vn.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "FSQRT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s16", + "name": "vsqrtq_f32", "arguments": [ - "int16_t * ptr", - "int16x4_t val" + "float32x4_t a" ], "return_type": { - "value": "void" + "value": "float32x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" - }, - "val": { - "register": "Vt.4H" + "a": { + "register": "Vn.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "FSQRT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s16_x2", + "name": "vsqrtq_f64", "arguments": [ - "int16_t * ptr", - "int16x4x2_t val" + "float64x2_t a" ], "return_type": { - "value": "void" + "value": "float64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" - }, - "val": { - "register": "Vt2.4H" + "a": { + "register": "Vn.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "FSQRT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s16_x3", + "name": "vsra_n_s16", "arguments": [ - "int16_t * ptr", - "int16x4x3_t val" + "int16x4_t a", + "int16x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int16x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4H" }, - "val": { - "register": "Vt3.4H" + "b": { + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -101394,26 +304846,31 @@ ], "instructions": [ [ - "ST1" + "SSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s16_x4", + "name": "vsra_n_s32", "arguments": [ - "int16_t * ptr", - "int16x4x4_t val" + "int32x2_t a", + "int32x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int32x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2S" }, - "val": { - "register": "Vt4.4H" + "b": { + "register": "Vn.2S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -101423,26 +304880,31 @@ ], "instructions": [ [ - "ST1" + "SSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s32", + "name": "vsra_n_s64", "arguments": [ - "int32_t * ptr", - "int32x2_t val" + "int64x1_t a", + "int64x1_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int64x1_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt.2S" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -101452,26 +304914,31 @@ ], "instructions": [ [ - "ST1" + "SSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s32_x2", + "name": "vsra_n_s8", "arguments": [ - "int32_t * ptr", - "int32x2x2_t val" + "int8x8_t a", + "int8x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int8x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8B" }, - "val": { - "register": "Vt2.2S" + "b": { + "register": "Vn.8B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -101481,26 +304948,31 @@ ], "instructions": [ [ - "ST1" + "SSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s32_x3", + "name": "vsra_n_u16", "arguments": [ - "int32_t * ptr", - "int32x2x3_t val" + "uint16x4_t a", + "uint16x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint16x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4H" }, - "val": { - "register": "Vt3.2S" + "b": { + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -101510,26 +304982,31 @@ ], "instructions": [ [ - "ST1" + "USRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s32_x4", + "name": "vsra_n_u32", "arguments": [ - "int32_t * ptr", - "int32x2x4_t val" + "uint32x2_t a", + "uint32x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint32x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2S" }, - "val": { - "register": "Vt4.2S" + "b": { + "register": "Vn.2S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -101539,26 +305016,31 @@ ], "instructions": [ [ - "ST1" + "USRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s64", + "name": "vsra_n_u64", "arguments": [ - "int64_t * ptr", - "int64x1_t val" + "uint64x1_t a", + "uint64x1_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint64x1_t" }, - "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "Arguments_Preparation": { + "a": { + "register": "Dd" }, - "val": { - "register": "Vt.1D" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -101568,26 +305050,31 @@ ], "instructions": [ [ - "ST1" + "USRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s64_x2", + "name": "vsra_n_u8", "arguments": [ - "int64_t * ptr", - "int64x1x2_t val" + "uint8x8_t a", + "uint8x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8B" }, - "val": { - "register": "Vt2.1D" + "b": { + "register": "Vn.8B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -101597,84 +305084,95 @@ ], "instructions": [ [ - "ST1" + "USRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s64_x3", + "name": "vsrad_n_s64", "arguments": [ - "int64_t * ptr", - "int64x1x3_t val" + "int64_t a", + "int64_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int64_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt3.1D" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s64_x4", + "name": "vsrad_n_u64", "arguments": [ - "int64_t * ptr", - "int64x1x4_t val" + "uint64_t a", + "uint64_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint64_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt4.1D" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "USRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s8", + "name": "vsraq_n_s16", "arguments": [ - "int8_t * ptr", - "int8x8_t val" + "int16x8_t a", + "int16x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8H" }, - "val": { - "register": "Vt.8B" + "b": { + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -101684,26 +305182,31 @@ ], "instructions": [ [ - "ST1" + "SSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s8_x2", + "name": "vsraq_n_s32", "arguments": [ - "int8_t * ptr", - "int8x8x2_t val" + "int32x4_t a", + "int32x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int32x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4S" }, - "val": { - "register": "Vt2.8B" + "b": { + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -101713,26 +305216,31 @@ ], "instructions": [ [ - "ST1" + "SSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s8_x3", + "name": "vsraq_n_s64", "arguments": [ - "int8_t * ptr", - "int8x8x3_t val" + "int64x2_t a", + "int64x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2D" }, - "val": { - "register": "Vt3.8B" + "b": { + "register": "Vn.2D" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -101742,26 +305250,31 @@ ], "instructions": [ [ - "ST1" + "SSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_s8_x4", + "name": "vsraq_n_s8", "arguments": [ - "int8_t * ptr", - "int8x8x4_t val" + "int8x16_t a", + "int8x16_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int8x16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.16B" }, - "val": { - "register": "Vt4.8B" + "b": { + "register": "Vn.16B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -101771,26 +305284,31 @@ ], "instructions": [ [ - "ST1" + "SSRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u16", + "name": "vsraq_n_u16", "arguments": [ - "uint16_t * ptr", - "uint16x4_t val" + "uint16x8_t a", + "uint16x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8H" }, - "val": { - "register": "Vt.4H" + "b": { + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -101800,26 +305318,31 @@ ], "instructions": [ [ - "ST1" + "USRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u16_x2", + "name": "vsraq_n_u32", "arguments": [ - "uint16_t * ptr", - "uint16x4x2_t val" + "uint32x4_t a", + "uint32x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4S" }, - "val": { - "register": "Vt2.4H" + "b": { + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -101829,26 +305352,31 @@ ], "instructions": [ [ - "ST1" + "USRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u16_x3", + "name": "vsraq_n_u64", "arguments": [ - "uint16_t * ptr", - "uint16x4x3_t val" + "uint64x2_t a", + "uint64x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2D" }, - "val": { - "register": "Vt3.4H" + "b": { + "register": "Vn.2D" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -101858,26 +305386,31 @@ ], "instructions": [ [ - "ST1" + "USRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u16_x4", + "name": "vsraq_n_u8", "arguments": [ - "uint16_t * ptr", - "uint16x4x4_t val" + "uint8x16_t a", + "uint8x16_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.16B" }, - "val": { - "register": "Vt4.4H" + "b": { + "register": "Vn.16B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -101887,26 +305420,31 @@ ], "instructions": [ [ - "ST1" + "USRA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u32", + "name": "vsri_n_p16", "arguments": [ - "uint32_t * ptr", - "uint32x2_t val" + "poly16x4_t a", + "poly16x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "poly16x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4H" }, - "val": { - "register": "Vt.2S" + "b": { + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -101916,55 +305454,64 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u32_x2", + "name": "vsri_n_p64", "arguments": [ - "uint32_t * ptr", - "uint32x2x2_t val" + "poly64x1_t a", + "poly64x1_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "poly64x1_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt2.2S" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u32_x3", + "name": "vsri_n_p8", "arguments": [ - "uint32_t * ptr", - "uint32x2x3_t val" + "poly8x8_t a", + "poly8x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8B" }, - "val": { - "register": "Vt3.2S" + "b": { + "register": "Vn.8B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -101974,26 +305521,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u32_x4", + "name": "vsri_n_s16", "arguments": [ - "uint32_t * ptr", - "uint32x2x4_t val" + "int16x4_t a", + "int16x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int16x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4H" }, - "val": { - "register": "Vt4.2S" + "b": { + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -102003,26 +305555,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u64", + "name": "vsri_n_s32", "arguments": [ - "uint64_t * ptr", - "uint64x1_t val" + "int32x2_t a", + "int32x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int32x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2S" }, - "val": { - "register": "Vt.1D" + "b": { + "register": "Vn.2S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -102032,26 +305589,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u64_x2", + "name": "vsri_n_s64", "arguments": [ - "uint64_t * ptr", - "uint64x1x2_t val" + "int64x1_t a", + "int64x1_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int64x1_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt2.1D" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -102061,26 +305623,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u64_x3", + "name": "vsri_n_s8", "arguments": [ - "uint64_t * ptr", - "uint64x1x3_t val" + "int8x8_t a", + "int8x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int8x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8B" }, - "val": { - "register": "Vt3.1D" + "b": { + "register": "Vn.8B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -102090,26 +305657,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u64_x4", + "name": "vsri_n_u16", "arguments": [ - "uint64_t * ptr", - "uint64x1x4_t val" + "uint16x4_t a", + "uint16x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint16x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4H" }, - "val": { - "register": "Vt4.1D" + "b": { + "register": "Vn.4H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -102119,26 +305691,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u8", + "name": "vsri_n_u32", "arguments": [ - "uint8_t * ptr", - "uint8x8_t val" + "uint32x2_t a", + "uint32x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint32x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2S" }, - "val": { - "register": "Vt.8B" + "b": { + "register": "Vn.2S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -102148,26 +305725,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u8_x2", + "name": "vsri_n_u64", "arguments": [ - "uint8_t * ptr", - "uint8x8x2_t val" + "uint64x1_t a", + "uint64x1_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint64x1_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt2.8B" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -102177,26 +305759,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u8_x3", + "name": "vsri_n_u8", "arguments": [ - "uint8_t * ptr", - "uint8x8x3_t val" + "uint8x8_t a", + "uint8x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8B" }, - "val": { - "register": "Vt3.8B" + "b": { + "register": "Vn.8B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -102206,84 +305793,95 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1_u8_x4", + "name": "vsrid_n_s64", "arguments": [ - "uint8_t * ptr", - "uint8x8x4_t val" + "int64_t a", + "int64_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int64_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt4.8B" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f16", + "name": "vsrid_n_u64", "arguments": [ - "float16_t * ptr", - "float16x8_t val" + "uint64_t a", + "uint64_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint64_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Dd" }, - "val": { - "register": "Vt.8H" + "b": { + "register": "Dn" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f16_x2", + "name": "vsriq_n_p16", "arguments": [ - "float16_t * ptr", - "float16x8x2_t val" + "poly16x8_t a", + "poly16x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "poly16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8H" }, - "val": { - "register": "Vt2.8H" + "b": { + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -102293,55 +305891,64 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f16_x3", + "name": "vsriq_n_p64", "arguments": [ - "float16_t * ptr", - "float16x8x3_t val" + "poly64x2_t a", + "poly64x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "poly64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2D" }, - "val": { - "register": "Vt3.8H" + "b": { + "register": "Vn.2D" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f16_x4", + "name": "vsriq_n_p8", "arguments": [ - "float16_t * ptr", - "float16x8x4_t val" + "poly8x16_t a", + "poly8x16_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "poly8x16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.16B" }, - "val": { - "register": "Vt4.8H" + "b": { + "register": "Vn.16B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -102351,26 +305958,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f32", + "name": "vsriq_n_s16", "arguments": [ - "float32_t * ptr", - "float32x4_t val" + "int16x8_t a", + "int16x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8H" }, - "val": { - "register": "Vt.4S" + "b": { + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ @@ -102380,26 +305992,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f32_x2", + "name": "vsriq_n_s32", "arguments": [ - "float32_t * ptr", - "float32x4x2_t val" + "int32x4_t a", + "int32x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int32x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4S" }, - "val": { - "register": "Vt2.4S" + "b": { + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ @@ -102409,26 +306026,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f32_x3", + "name": "vsriq_n_s64", "arguments": [ - "float32_t * ptr", - "float32x4x3_t val" + "int64x2_t a", + "int64x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2D" }, - "val": { - "register": "Vt3.4S" + "b": { + "register": "Vn.2D" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ @@ -102438,26 +306060,31 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f32_x4", + "name": "vsriq_n_s8", "arguments": [ - "float32_t * ptr", - "float32x4x4_t val" + "int8x16_t a", + "int8x16_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "int8x16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.16B" }, - "val": { - "register": "Vt4.4S" + "b": { + "register": "Vn.16B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ @@ -102467,139 +306094,162 @@ ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f64", + "name": "vsriq_n_u16", "arguments": [ - "float64_t * ptr", - "float64x2_t val" + "uint16x8_t a", + "uint16x8_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint16x8_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.8H" }, - "val": { - "register": "Vt.2D" + "b": { + "register": "Vn.8H" + }, + "n": { + "minimum": 1, + "maximum": 16 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f64_x2", + "name": "vsriq_n_u32", "arguments": [ - "float64_t * ptr", - "float64x2x2_t val" + "uint32x4_t a", + "uint32x4_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint32x4_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.4S" }, - "val": { - "register": "Vt2.2D" + "b": { + "register": "Vn.4S" + }, + "n": { + "minimum": 1, + "maximum": 32 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f64_x3", + "name": "vsriq_n_u64", "arguments": [ - "float64_t * ptr", - "float64x2x3_t val" + "uint64x2_t a", + "uint64x2_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint64x2_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.2D" }, - "val": { - "register": "Vt3.2D" + "b": { + "register": "Vn.2D" + }, + "n": { + "minimum": 1, + "maximum": 64 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_f64_x4", + "name": "vsriq_n_u8", "arguments": [ - "float64_t * ptr", - "float64x2x4_t val" + "uint8x16_t a", + "uint8x16_t b", + "const int n" ], "return_type": { - "value": "void" + "value": "uint8x16_t" }, "Arguments_Preparation": { - "ptr": { - "register": "Xn" + "a": { + "register": "Vd.16B" }, - "val": { - "register": "Vt4.2D" + "b": { + "register": "Vn.16B" + }, + "n": { + "minimum": 1, + "maximum": 8 } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST1" + "SRI" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_f16", + "name": "vst1_f16", "arguments": [ "float16_t * ptr", - "float16x8_t val", - "const int lane" + "float16x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt.8H" + "register": "Vt.4H" } }, "Architectures": [ @@ -102615,25 +306265,23 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_f32", + "name": "vst1_f16_x2", "arguments": [ - "float32_t * ptr", - "float32x4_t val", - "const int lane" + "float16_t * ptr", + "float16x4x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.4S" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -102649,28 +306297,31 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_f64", + "name": "vst1_f16_x3", "arguments": [ - "float64_t * ptr", - "float64x2_t val", - "const int lane" + "float16_t * ptr", + "float16x4x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.2D" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -102681,25 +306332,29 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_p16", + "name": "vst1_f16_x4", "arguments": [ - "poly16_t * ptr", - "poly16x8_t val", - "const int lane" + "float16_t * ptr", + "float16x4x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ @@ -102715,28 +306370,24 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_p64", + "name": "vst1_f32", "arguments": [ - "poly64_t * ptr", - "poly64x2_t val", - "const int lane" + "float32_t * ptr", + "float32x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt.2D" + "register": "Vt.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -102748,25 +306399,23 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_p8", + "name": "vst1_f32_x2", "arguments": [ - "poly8_t * ptr", - "poly8x16_t val", - "const int lane" + "float32_t * ptr", + "float32x2x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.16B" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" } }, "Architectures": [ @@ -102782,25 +306431,26 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_s16", + "name": "vst1_f32_x3", "arguments": [ - "int16_t * ptr", - "int16x8_t val", - "const int lane" + "float32_t * ptr", + "float32x2x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.8H" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" } }, "Architectures": [ @@ -102816,25 +306466,29 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_s32", + "name": "vst1_f32_x4", "arguments": [ - "int32_t * ptr", - "int32x4_t val", - "const int lane" + "float32_t * ptr", + "float32x2x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { + "register": "Vt4.2S" } }, "Architectures": [ @@ -102850,30 +306504,23 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_s64", + "name": "vst1_f64", "arguments": [ - "int64_t * ptr", - "int64x2_t val", - "const int lane" + "float64_t * ptr", + "float64x1_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt.2D" + "register": "Vt.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -102884,30 +306531,26 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_s8", + "name": "vst1_f64_x2", "arguments": [ - "int8_t * ptr", - "int8x16_t val", - "const int lane" + "float64_t * ptr", + "float64x1x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.16B" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -102918,30 +306561,29 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_u16", + "name": "vst1_f64_x3", "arguments": [ - "uint16_t * ptr", - "uint16x8_t val", - "const int lane" + "float64_t * ptr", + "float64x1x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.8H" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -102952,30 +306594,32 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_u32", + "name": "vst1_f64_x4", "arguments": [ - "uint32_t * ptr", - "uint32x4_t val", - "const int lane" + "float64_t * ptr", + "float64x1x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.4S" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -102986,10 +306630,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_u64", + "name": "vst1_lane_f16", "arguments": [ - "uint64_t * ptr", - "uint64x2_t val", + "float16_t * ptr", + "float16x4_t val", "const int lane" ], "return_type": { @@ -102998,13 +306642,13 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 3 }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt.2D" + "register": "Vt.4H" } }, "Architectures": [ @@ -103020,10 +306664,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_lane_u8", + "name": "vst1_lane_f32", "arguments": [ - "uint8_t * ptr", - "uint8x16_t val", + "float32_t * ptr", + "float32x2_t val", "const int lane" ], "return_type": { @@ -103032,13 +306676,13 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 15 + "maximum": 1 }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt.16B" + "register": "Vt.2S" } }, "Architectures": [ @@ -103054,25 +306698,28 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p16", + "name": "vst1_lane_f64", "arguments": [ - "poly16_t * ptr", - "poly16x8_t val" + "float64_t * ptr", + "float64x1_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt.8H" + "register": "Vt.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -103083,20 +306730,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p16_x2", + "name": "vst1_lane_p16", "arguments": [ "poly16_t * ptr", - "poly16x8x2_t val" + "poly16x4_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.8H" + "register": "Vt.4H" } }, "Architectures": [ @@ -103112,24 +306764,28 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p16_x3", + "name": "vst1_lane_p64", "arguments": [ - "poly16_t * ptr", - "poly16x8x3_t val" + "poly64_t * ptr", + "poly64x1_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt3.8H" + "register": "Vt.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -103141,20 +306797,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p16_x4", + "name": "vst1_lane_p8", "arguments": [ - "poly16_t * ptr", - "poly16x8x4_t val" + "poly8_t * ptr", + "poly8x8_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt4.8H" + "register": "Vt.8B" } }, "Architectures": [ @@ -103170,23 +306831,29 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p64", + "name": "vst1_lane_s16", "arguments": [ - "poly64_t * ptr", - "poly64x2_t val" + "int16_t * ptr", + "int16x4_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt.2D" + "register": "Vt.4H" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -103198,23 +306865,29 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p64_x2", + "name": "vst1_lane_s32", "arguments": [ - "poly64_t * ptr", - "poly64x2x2_t val" + "int32_t * ptr", + "int32x2_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.2D" + "register": "Vt.2S" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -103226,20 +306899,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p64_x3", + "name": "vst1_lane_s64", "arguments": [ - "poly64_t * ptr", - "poly64x2x3_t val" + "int64_t * ptr", + "int64x1_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt3.2D" + "register": "Vt.1D" } }, "Architectures": [ @@ -103255,23 +306933,29 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p64_x4", + "name": "vst1_lane_s8", "arguments": [ - "poly64_t * ptr", - "poly64x2x4_t val" + "int8_t * ptr", + "int8x8_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt4.2D" + "register": "Vt.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -103283,20 +306967,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p8", + "name": "vst1_lane_u16", "arguments": [ - "poly8_t * ptr", - "poly8x16_t val" + "uint16_t * ptr", + "uint16x4_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt.16B" + "register": "Vt.4H" } }, "Architectures": [ @@ -103312,20 +307001,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p8_x2", + "name": "vst1_lane_u32", "arguments": [ - "poly8_t * ptr", - "poly8x16x2_t val" + "uint32_t * ptr", + "uint32x2_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.16B" + "register": "Vt.2S" } }, "Architectures": [ @@ -103341,20 +307035,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p8_x3", + "name": "vst1_lane_u64", "arguments": [ - "poly8_t * ptr", - "poly8x16x3_t val" + "uint64_t * ptr", + "uint64x1_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt3.16B" + "register": "Vt.1D" } }, "Architectures": [ @@ -103370,20 +307069,25 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_p8_x4", + "name": "vst1_lane_u8", "arguments": [ - "poly8_t * ptr", - "poly8x16x4_t val" + "uint8_t * ptr", + "uint8x8_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt4.16B" + "register": "Vt.8B" } }, "Architectures": [ @@ -103399,10 +307103,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s16", + "name": "vst1_mf8_x4", "arguments": [ - "int16_t * ptr", - "int16x8_t val" + "int8_t * ptr", + "int8x8x4_t val" ], "return_type": { "value": "void" @@ -103411,13 +307115,20 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.8H" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -103428,10 +307139,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s16_x2", + "name": "vst1_p16", "arguments": [ - "int16_t * ptr", - "int16x8x2_t val" + "poly16_t * ptr", + "poly16x4_t val" ], "return_type": { "value": "void" @@ -103441,7 +307152,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.8H" + "register": "Vt.4H" } }, "Architectures": [ @@ -103457,10 +307168,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s16_x3", + "name": "vst1_p16_x2", "arguments": [ - "int16_t * ptr", - "int16x8x3_t val" + "poly16_t * ptr", + "poly16x4x2_t val" ], "return_type": { "value": "void" @@ -103469,8 +307180,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -103486,10 +307200,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s16_x4", + "name": "vst1_p16_x3", "arguments": [ - "int16_t * ptr", - "int16x8x4_t val" + "poly16_t * ptr", + "poly16x4x3_t val" ], "return_type": { "value": "void" @@ -103498,8 +307212,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -103515,10 +307235,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s32", + "name": "vst1_p16_x4", "arguments": [ - "int32_t * ptr", - "int32x4_t val" + "poly16_t * ptr", + "poly16x4x4_t val" ], "return_type": { "value": "void" @@ -103527,8 +307247,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.4S" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ @@ -103544,10 +307273,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s32_x2", + "name": "vst1_p64", "arguments": [ - "int32_t * ptr", - "int32x4x2_t val" + "poly64_t * ptr", + "poly64x1_t val" ], "return_type": { "value": "void" @@ -103557,11 +307286,10 @@ "register": "Xn" }, "val": { - "register": "Vt2.4S" + "register": "Vt.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -103573,10 +307301,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s32_x3", + "name": "vst1_p64_x2", "arguments": [ - "int32_t * ptr", - "int32x4x3_t val" + "poly64_t * ptr", + "poly64x1x2_t val" ], "return_type": { "value": "void" @@ -103585,12 +307313,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4S" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -103602,10 +307332,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s32_x4", + "name": "vst1_p64_x3", "arguments": [ - "int32_t * ptr", - "int32x4x4_t val" + "poly64_t * ptr", + "poly64x1x3_t val" ], "return_type": { "value": "void" @@ -103614,12 +307344,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4S" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -103631,10 +307366,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s64", + "name": "vst1_p64_x4", "arguments": [ - "int64_t * ptr", - "int64x2_t val" + "poly64_t * ptr", + "poly64x1x4_t val" ], "return_type": { "value": "void" @@ -103643,12 +307378,20 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ - "v7", "A32", "A64" ], @@ -103660,10 +307403,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s64_x2", + "name": "vst1_p8", "arguments": [ - "int64_t * ptr", - "int64x2x2_t val" + "poly8_t * ptr", + "poly8x8_t val" ], "return_type": { "value": "void" @@ -103673,7 +307416,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.2D" + "register": "Vt.8B" } }, "Architectures": [ @@ -103689,10 +307432,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s64_x3", + "name": "vst1_p8_x2", "arguments": [ - "int64_t * ptr", - "int64x2x3_t val" + "poly8_t * ptr", + "poly8x8x2_t val" ], "return_type": { "value": "void" @@ -103701,8 +307444,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" } }, "Architectures": [ @@ -103718,10 +307464,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s64_x4", + "name": "vst1_p8_x3", "arguments": [ - "int64_t * ptr", - "int64x2x4_t val" + "poly8_t * ptr", + "poly8x8x3_t val" ], "return_type": { "value": "void" @@ -103730,8 +307476,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" } }, "Architectures": [ @@ -103747,10 +307499,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s8", + "name": "vst1_p8_x4", "arguments": [ - "int8_t * ptr", - "int8x16_t val" + "poly8_t * ptr", + "poly8x8x4_t val" ], "return_type": { "value": "void" @@ -103759,8 +307511,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ @@ -103776,10 +307537,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s8_x2", + "name": "vst1_s16", "arguments": [ - "int8_t * ptr", - "int8x16x2_t val" + "int16_t * ptr", + "int16x4_t val" ], "return_type": { "value": "void" @@ -103789,7 +307550,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.16B" + "register": "Vt.4H" } }, "Architectures": [ @@ -103805,10 +307566,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s8_x3", + "name": "vst1_s16_x2", "arguments": [ - "int8_t * ptr", - "int8x16x3_t val" + "int16_t * ptr", + "int16x4x2_t val" ], "return_type": { "value": "void" @@ -103817,8 +307578,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.16B" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -103834,10 +307598,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_s8_x4", + "name": "vst1_s16_x3", "arguments": [ - "int8_t * ptr", - "int8x16x4_t val" + "int16_t * ptr", + "int16x4x3_t val" ], "return_type": { "value": "void" @@ -103846,8 +307610,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.16B" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -103863,10 +307633,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u16", + "name": "vst1_s16_x4", "arguments": [ - "uint16_t * ptr", - "uint16x8_t val" + "int16_t * ptr", + "int16x4x4_t val" ], "return_type": { "value": "void" @@ -103875,8 +307645,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ @@ -103892,10 +307671,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u16_x2", + "name": "vst1_s32", "arguments": [ - "uint16_t * ptr", - "uint16x8x2_t val" + "int32_t * ptr", + "int32x2_t val" ], "return_type": { "value": "void" @@ -103905,7 +307684,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.8H" + "register": "Vt.2S" } }, "Architectures": [ @@ -103921,10 +307700,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u16_x3", + "name": "vst1_s32_x2", "arguments": [ - "uint16_t * ptr", - "uint16x8x3_t val" + "int32_t * ptr", + "int32x2x2_t val" ], "return_type": { "value": "void" @@ -103933,8 +307712,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" } }, "Architectures": [ @@ -103950,10 +307732,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u16_x4", + "name": "vst1_s32_x3", "arguments": [ - "uint16_t * ptr", - "uint16x8x4_t val" + "int32_t * ptr", + "int32x2x3_t val" ], "return_type": { "value": "void" @@ -103962,8 +307744,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" } }, "Architectures": [ @@ -103979,10 +307767,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u32", + "name": "vst1_s32_x4", "arguments": [ - "uint32_t * ptr", - "uint32x4_t val" + "int32_t * ptr", + "int32x2x4_t val" ], "return_type": { "value": "void" @@ -103991,8 +307779,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { + "register": "Vt4.2S" } }, "Architectures": [ @@ -104008,10 +307805,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u32_x2", + "name": "vst1_s64", "arguments": [ - "uint32_t * ptr", - "uint32x4x2_t val" + "int64_t * ptr", + "int64x1_t val" ], "return_type": { "value": "void" @@ -104021,7 +307818,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.4S" + "register": "Vt.1D" } }, "Architectures": [ @@ -104037,10 +307834,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u32_x3", + "name": "vst1_s64_x2", "arguments": [ - "uint32_t * ptr", - "uint32x4x3_t val" + "int64_t * ptr", + "int64x1x2_t val" ], "return_type": { "value": "void" @@ -104049,8 +307846,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4S" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ @@ -104066,10 +307866,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u32_x4", + "name": "vst1_s64_x3", "arguments": [ - "uint32_t * ptr", - "uint32x4x4_t val" + "int64_t * ptr", + "int64x1x3_t val" ], "return_type": { "value": "void" @@ -104078,8 +307878,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4S" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ @@ -104095,10 +307901,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u64", + "name": "vst1_s64_x4", "arguments": [ - "uint64_t * ptr", - "uint64x2_t val" + "int64_t * ptr", + "int64x1x4_t val" ], "return_type": { "value": "void" @@ -104107,8 +307913,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ @@ -104124,10 +307939,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u64_x2", + "name": "vst1_s8", "arguments": [ - "uint64_t * ptr", - "uint64x2x2_t val" + "int8_t * ptr", + "int8x8_t val" ], "return_type": { "value": "void" @@ -104137,7 +307952,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.2D" + "register": "Vt.8B" } }, "Architectures": [ @@ -104153,10 +307968,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u64_x3", + "name": "vst1_s8_x2", "arguments": [ - "uint64_t * ptr", - "uint64x2x3_t val" + "int8_t * ptr", + "int8x8x2_t val" ], "return_type": { "value": "void" @@ -104165,8 +307980,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" } }, "Architectures": [ @@ -104182,10 +308000,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u64_x4", + "name": "vst1_s8_x3", "arguments": [ - "uint64_t * ptr", - "uint64x2x4_t val" + "int8_t * ptr", + "int8x8x3_t val" ], "return_type": { "value": "void" @@ -104194,8 +308012,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" } }, "Architectures": [ @@ -104211,10 +308035,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u8", + "name": "vst1_s8_x4", "arguments": [ - "uint8_t * ptr", - "uint8x16_t val" + "int8_t * ptr", + "int8x8x4_t val" ], "return_type": { "value": "void" @@ -104223,8 +308047,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ @@ -104240,10 +308073,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u8_x2", + "name": "vst1_u16", "arguments": [ - "uint8_t * ptr", - "uint8x16x2_t val" + "uint16_t * ptr", + "uint16x4_t val" ], "return_type": { "value": "void" @@ -104253,7 +308086,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.16B" + "register": "Vt.4H" } }, "Architectures": [ @@ -104269,10 +308102,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u8_x3", + "name": "vst1_u16_x2", "arguments": [ - "uint8_t * ptr", - "uint8x16x3_t val" + "uint16_t * ptr", + "uint16x4x2_t val" ], "return_type": { "value": "void" @@ -104281,8 +308114,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.16B" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -104298,10 +308134,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst1q_u8_x4", + "name": "vst1_u16_x3", "arguments": [ - "uint8_t * ptr", - "uint8x16x4_t val" + "uint16_t * ptr", + "uint16x4x3_t val" ], "return_type": { "value": "void" @@ -104310,8 +308146,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.16B" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -104327,10 +308169,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst2_f16", + "name": "vst1_u16_x4", "arguments": [ - "float16_t * ptr", - "float16x4x2_t val" + "uint16_t * ptr", + "uint16x4x4_t val" ], "return_type": { "value": "void" @@ -104339,8 +308181,17 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ @@ -104350,16 +308201,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_f32", + "name": "vst1_u32", "arguments": [ - "float32_t * ptr", - "float32x2x2_t val" + "uint32_t * ptr", + "uint32x2_t val" ], "return_type": { "value": "void" @@ -104369,7 +308220,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.2S" + "register": "Vt.2S" } }, "Architectures": [ @@ -104379,16 +308230,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_f64", + "name": "vst1_u32_x2", "arguments": [ - "float64_t * ptr", - "float64x1x2_t val" + "uint32_t * ptr", + "uint32x2x2_t val" ], "return_type": { "value": "void" @@ -104397,11 +308248,16 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.1D" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -104412,25 +308268,26 @@ }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_f16", + "name": "vst1_u32_x3", "arguments": [ - "float16_t * ptr", - "float16x4x2_t val", - "const int lane" + "uint32_t * ptr", + "uint32x2x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.4H" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" } }, "Architectures": [ @@ -104440,31 +308297,35 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_f32", + "name": "vst1_u32_x4", "arguments": [ - "float32_t * ptr", - "float32x2x2_t val", - "const int lane" + "uint32_t * ptr", + "uint32x2x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { + "register": "Vt4.2S" } }, "Architectures": [ @@ -104474,63 +308335,58 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_f64", + "name": "vst1_u64", "arguments": [ - "float64_t * ptr", - "float64x1x2_t val", - "const int lane" + "uint64_t * ptr", + "uint64x1_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.1D" + "register": "Vt.1D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_p16", + "name": "vst1_u64_x2", "arguments": [ - "poly16_t * ptr", - "poly16x4x2_t val", - "const int lane" + "uint64_t * ptr", + "uint64x1x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.4H" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ @@ -104540,63 +308396,70 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_p64", + "name": "vst1_u64_x3", "arguments": [ - "poly64_t * ptr", - "poly64x1x2_t val", - "const int lane" + "uint64_t * ptr", + "uint64x1x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_p8", + "name": "vst1_u64_x4", "arguments": [ - "poly8_t * ptr", - "poly8x8x2_t val", - "const int lane" + "uint64_t * ptr", + "uint64x1x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.8B" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ @@ -104606,31 +308469,26 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_s16", + "name": "vst1_u8", "arguments": [ - "int16_t * ptr", - "int16x4x2_t val", - "const int lane" + "uint8_t * ptr", + "uint8x8_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.4H" + "register": "Vt.8B" } }, "Architectures": [ @@ -104640,31 +308498,29 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_s32", + "name": "vst1_u8_x2", "arguments": [ - "int32_t * ptr", - "int32x2x2_t val", - "const int lane" + "uint8_t * ptr", + "uint8x8x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.2S" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" } }, "Architectures": [ @@ -104674,63 +308530,70 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_s64", + "name": "vst1_u8_x3", "arguments": [ - "int64_t * ptr", - "int64x1x2_t val", - "const int lane" + "uint8_t * ptr", + "uint8x8x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.1D" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_s8", + "name": "vst1_u8_x4", "arguments": [ - "int8_t * ptr", - "int8x8x2_t val", - "const int lane" + "uint8_t * ptr", + "uint8x8x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ @@ -104740,31 +308603,26 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_u16", + "name": "vst1q_f16", "arguments": [ - "uint16_t * ptr", - "uint16x4x2_t val", - "const int lane" + "float16_t * ptr", + "float16x8_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.4H" + "register": "Vt.8H" } }, "Architectures": [ @@ -104774,31 +308632,29 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_u32", + "name": "vst1q_f16_x2", "arguments": [ - "uint32_t * ptr", - "uint32x2x2_t val", - "const int lane" + "float16_t * ptr", + "float16x8x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.2S" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -104808,63 +308664,70 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_u64", + "name": "vst1q_f16_x3", "arguments": [ - "uint64_t * ptr", - "uint64x1x2_t val", - "const int lane" + "float16_t * ptr", + "float16x8x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.1D" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_lane_u8", + "name": "vst1q_f16_x4", "arguments": [ - "uint8_t * ptr", - "uint8x8x2_t val", - "const int lane" + "float16_t * ptr", + "float16x8x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.8B" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ @@ -104874,16 +308737,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_p16", + "name": "vst1q_f32", "arguments": [ - "poly16_t * ptr", - "poly16x4x2_t val" + "float32_t * ptr", + "float32x4_t val" ], "return_type": { "value": "void" @@ -104893,7 +308756,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.4H" + "register": "Vt.4S" } }, "Architectures": [ @@ -104903,16 +308766,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_p64", + "name": "vst1q_f32_x2", "arguments": [ - "poly64_t * ptr", - "poly64x1x2_t val" + "float32_t * ptr", + "float32x4x2_t val" ], "return_type": { "value": "void" @@ -104921,11 +308784,15 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.1D" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -104937,10 +308804,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst2_p8", + "name": "vst1q_f32_x3", "arguments": [ - "poly8_t * ptr", - "poly8x8x2_t val" + "float32_t * ptr", + "float32x4x3_t val" ], "return_type": { "value": "void" @@ -104949,8 +308816,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.8B" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" } }, "Architectures": [ @@ -104960,16 +308833,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_s16", + "name": "vst1q_f32_x4", "arguments": [ - "int16_t * ptr", - "int16x4x2_t val" + "float32_t * ptr", + "float32x4x4_t val" ], "return_type": { "value": "void" @@ -104978,8 +308851,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.4H" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { + "register": "Vt4.4S" } }, "Architectures": [ @@ -104989,16 +308871,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_s32", + "name": "vst1q_f64", "arguments": [ - "int32_t * ptr", - "int32x2x2_t val" + "float64_t * ptr", + "float64x2_t val" ], "return_type": { "value": "void" @@ -105008,26 +308890,24 @@ "register": "Xn" }, "val": { - "register": "Vt2.2S" + "register": "Vt.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_s64", + "name": "vst1q_f64_x2", "arguments": [ - "int64_t * ptr", - "int64x1x2_t val" + "float64_t * ptr", + "float64x2x2_t val" ], "return_type": { "value": "void" @@ -105036,13 +308916,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ @@ -105053,10 +308934,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst2_s8", + "name": "vst1q_f64_x3", "arguments": [ - "int8_t * ptr", - "int8x8x2_t val" + "float64_t * ptr", + "float64x2x3_t val" ], "return_type": { "value": "void" @@ -105065,27 +308946,31 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.8B" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_u16", + "name": "vst1q_f64_x4", "arguments": [ - "uint16_t * ptr", - "uint16x4x2_t val" + "float64_t * ptr", + "float64x2x4_t val" ], "return_type": { "value": "void" @@ -105094,37 +308979,49 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.4H" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_u32", + "name": "vst1q_lane_f16", "arguments": [ - "uint32_t * ptr", - "uint32x2x2_t val" + "float16_t * ptr", + "float16x8_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.2S" + "register": "Vt.8H" } }, "Architectures": [ @@ -105134,26 +309031,31 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2_u64", + "name": "vst1q_lane_f32", "arguments": [ - "uint64_t * ptr", - "uint64x1x2_t val" + "float32_t * ptr", + "float32x4_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 3 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.1D" + "register": "Vt.4S" } }, "Architectures": [ @@ -105169,49 +309071,57 @@ }, { "SIMD_ISA": "Neon", - "name": "vst2_u8", + "name": "vst1q_lane_f64", "arguments": [ - "uint8_t * ptr", - "uint8x8x2_t val" + "float64_t * ptr", + "float64x2_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.8B" + "register": "Vt.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_f16", + "name": "vst1q_lane_p16", "arguments": [ - "float16_t * ptr", - "float16x8x2_t val" + "poly16_t * ptr", + "poly16x8_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 7 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.8H" + "register": "Vt.8H" } }, "Architectures": [ @@ -105221,72 +309131,83 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_f32", + "name": "vst1q_lane_p64", "arguments": [ - "float32_t * ptr", - "float32x4x2_t val" + "poly64_t * ptr", + "poly64x2_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.4S" + "register": "Vt.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_f64", + "name": "vst1q_lane_p8", "arguments": [ - "float64_t * ptr", - "float64x2x2_t val" + "poly8_t * ptr", + "poly8x16_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 15 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.2D" + "register": "Vt.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_f16", + "name": "vst1q_lane_s16", "arguments": [ - "float16_t * ptr", - "float16x8x2_t val", + "int16_t * ptr", + "int16x8_t val", "const int lane" ], "return_type": { @@ -105301,7 +309222,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.8H" + "register": "Vt.8H" } }, "Architectures": [ @@ -105311,16 +309232,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_f32", + "name": "vst1q_lane_s32", "arguments": [ - "float32_t * ptr", - "float32x4x2_t val", + "int32_t * ptr", + "int32x4_t val", "const int lane" ], "return_type": { @@ -105335,7 +309256,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.4S" + "register": "Vt.4S" } }, "Architectures": [ @@ -105345,16 +309266,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_f64", + "name": "vst1q_lane_s64", "arguments": [ - "float64_t * ptr", - "float64x2x2_t val", + "int64_t * ptr", + "int64x2_t val", "const int lane" ], "return_type": { @@ -105363,30 +309284,32 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 2 + "maximum": 1 }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.2D" + "register": "Vt.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_p16", + "name": "vst1q_lane_s8", "arguments": [ - "poly16_t * ptr", - "poly16x8x2_t val", + "int8_t * ptr", + "int8x16_t val", "const int lane" ], "return_type": { @@ -105395,13 +309318,13 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 15 }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.8H" + "register": "Vt.16B" } }, "Architectures": [ @@ -105411,16 +309334,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_p64", + "name": "vst1q_lane_u16", "arguments": [ - "poly64_t * ptr", - "poly64x2x2_t val", + "uint16_t * ptr", + "uint16x8_t val", "const int lane" ], "return_type": { @@ -105429,30 +309352,32 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 7 }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.2D" + "register": "Vt.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_p8", + "name": "vst1q_lane_u32", "arguments": [ - "poly8_t * ptr", - "poly8x16x2_t val", + "uint32_t * ptr", + "uint32x4_t val", "const int lane" ], "return_type": { @@ -105461,30 +309386,32 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 15 + "maximum": 3 }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.16B" + "register": "Vt.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_s16", + "name": "vst1q_lane_u64", "arguments": [ - "int16_t * ptr", - "int16x8x2_t val", + "uint64_t * ptr", + "uint64x2_t val", "const int lane" ], "return_type": { @@ -105493,13 +309420,13 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 1 }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.8H" + "register": "Vt.2D" } }, "Architectures": [ @@ -105509,16 +309436,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_s32", + "name": "vst1q_lane_u8", "arguments": [ - "int32_t * ptr", - "int32x4x2_t val", + "uint8_t * ptr", + "uint8x16_t val", "const int lane" ], "return_type": { @@ -105527,13 +309454,13 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 15 }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.4S" + "register": "Vt.16B" } }, "Architectures": [ @@ -105543,31 +309470,35 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_s64", + "name": "vst1q_mf8_x4", "arguments": [ - "int64_t * ptr", - "int64x2x2_t val", - "const int lane" + "int8_t * ptr", + "int8x16x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.2D" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { + "register": "Vt4.16B" } }, "Architectures": [ @@ -105575,62 +309506,57 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_s8", + "name": "vst1q_p16", "arguments": [ - "int8_t * ptr", - "int8x16x2_t val", - "const int lane" + "poly16_t * ptr", + "poly16x8_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.16B" + "register": "Vt.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_u16", + "name": "vst1q_p16_x2", "arguments": [ - "uint16_t * ptr", - "uint16x8x2_t val", - "const int lane" + "poly16_t * ptr", + "poly16x8x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { "register": "Vt2.8H" } }, @@ -105641,31 +309567,32 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_u32", + "name": "vst1q_p16_x3", "arguments": [ - "uint32_t * ptr", - "uint32x4x2_t val", - "const int lane" + "poly16_t * ptr", + "poly16x8x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.4S" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ @@ -105675,80 +309602,82 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_u64", + "name": "vst1q_p16_x4", "arguments": [ - "uint64_t * ptr", - "uint64x2x2_t val", - "const int lane" + "poly16_t * ptr", + "poly16x8x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.2D" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_lane_u8", + "name": "vst1q_p64", "arguments": [ - "uint8_t * ptr", - "uint8x16x2_t val", - "const int lane" + "poly64_t * ptr", + "poly64x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 15 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt2.16B" + "register": "Vt.2D" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_p16", + "name": "vst1q_p64_x2", "arguments": [ - "poly16_t * ptr", - "poly16x8x2_t val" + "poly64_t * ptr", + "poly64x2x2_t val" ], "return_type": { "value": "void" @@ -105757,27 +309686,29 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.8H" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_p64", + "name": "vst1q_p64_x3", "arguments": [ "poly64_t * ptr", - "poly64x2x2_t val" + "poly64x2x3_t val" ], "return_type": { "value": "void" @@ -105786,25 +309717,33 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_p8", + "name": "vst1q_p64_x4", "arguments": [ - "poly8_t * ptr", - "poly8x16x2_t val" + "poly64_t * ptr", + "poly64x2x4_t val" ], "return_type": { "value": "void" @@ -105813,27 +309752,35 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.16B" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_s16", + "name": "vst1q_p8", "arguments": [ - "int16_t * ptr", - "int16x8x2_t val" + "poly8_t * ptr", + "poly8x16_t val" ], "return_type": { "value": "void" @@ -105843,7 +309790,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.8H" + "register": "Vt.16B" } }, "Architectures": [ @@ -105853,16 +309800,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_s32", + "name": "vst1q_p8_x2", "arguments": [ - "int32_t * ptr", - "int32x4x2_t val" + "poly8_t * ptr", + "poly8x16x2_t val" ], "return_type": { "value": "void" @@ -105871,8 +309818,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.4S" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" } }, "Architectures": [ @@ -105882,16 +309832,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_s64", + "name": "vst1q_p8_x3", "arguments": [ - "int64_t * ptr", - "int64x2x2_t val" + "poly8_t * ptr", + "poly8x16x3_t val" ], "return_type": { "value": "void" @@ -105900,25 +309850,33 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.2D" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_s8", + "name": "vst1q_p8_x4", "arguments": [ - "int8_t * ptr", - "int8x16x2_t val" + "poly8_t * ptr", + "poly8x16x4_t val" ], "return_type": { "value": "void" @@ -105927,8 +309885,17 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { + "register": "Vt4.16B" } }, "Architectures": [ @@ -105938,16 +309905,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_u16", + "name": "vst1q_s16", "arguments": [ - "uint16_t * ptr", - "uint16x8x2_t val" + "int16_t * ptr", + "int16x8_t val" ], "return_type": { "value": "void" @@ -105957,7 +309924,7 @@ "register": "Xn" }, "val": { - "register": "Vt2.8H" + "register": "Vt.8H" } }, "Architectures": [ @@ -105967,16 +309934,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_u32", + "name": "vst1q_s16_x2", "arguments": [ - "uint32_t * ptr", - "uint32x4x2_t val" + "int16_t * ptr", + "int16x8x2_t val" ], "return_type": { "value": "void" @@ -105985,8 +309952,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.4S" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -105996,16 +309966,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_u64", + "name": "vst1q_s16_x3", "arguments": [ - "uint64_t * ptr", - "uint64x2x2_t val" + "int16_t * ptr", + "int16x8x3_t val" ], "return_type": { "value": "void" @@ -106014,25 +309984,33 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.2D" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst2q_u8", + "name": "vst1q_s16_x4", "arguments": [ - "uint8_t * ptr", - "uint8x16x2_t val" + "int16_t * ptr", + "int16x8x4_t val" ], "return_type": { "value": "void" @@ -106041,8 +310019,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt2.16B" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ @@ -106052,16 +310039,16 @@ ], "instructions": [ [ - "ST2" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_f16", + "name": "vst1q_s32", "arguments": [ - "float16_t * ptr", - "float16x4x3_t val" + "int32_t * ptr", + "int32x4_t val" ], "return_type": { "value": "void" @@ -106071,7 +310058,7 @@ "register": "Xn" }, "val": { - "register": "Vt3.4H" + "register": "Vt.4S" } }, "Architectures": [ @@ -106081,16 +310068,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_f32", + "name": "vst1q_s32_x2", "arguments": [ - "float32_t * ptr", - "float32x2x3_t val" + "int32_t * ptr", + "int32x4x2_t val" ], "return_type": { "value": "void" @@ -106099,8 +310086,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2S" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" } }, "Architectures": [ @@ -106110,16 +310100,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_f64", + "name": "vst1q_s32_x3", "arguments": [ - "float64_t * ptr", - "float64x1x3_t val" + "int32_t * ptr", + "int32x4x3_t val" ], "return_type": { "value": "void" @@ -106128,11 +310118,19 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.1D" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ @@ -106143,25 +310141,29 @@ }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_f16", + "name": "vst1q_s32_x4", "arguments": [ - "float16_t * ptr", - "float16x4x3_t val", - "const int lane" + "int32_t * ptr", + "int32x4x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4H" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { + "register": "Vt4.4S" } }, "Architectures": [ @@ -106171,31 +310173,26 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_f32", + "name": "vst1q_s64", "arguments": [ - "float32_t * ptr", - "float32x2x3_t val", - "const int lane" + "int64_t * ptr", + "int64x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt3.2S" + "register": "Vt.2D" } }, "Architectures": [ @@ -106205,63 +310202,64 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_f64", + "name": "vst1q_s64_x2", "arguments": [ - "float64_t * ptr", - "float64x1x3_t val", - "const int lane" + "int64_t * ptr", + "int64x2x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_p16", + "name": "vst1q_s64_x3", "arguments": [ - "poly16_t * ptr", - "poly16x4x3_t val", - "const int lane" + "int64_t * ptr", + "int64x2x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4H" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ @@ -106271,63 +310269,64 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_p64", + "name": "vst1q_s64_x4", "arguments": [ - "poly64_t * ptr", - "poly64x1x3_t val", - "const int lane" + "int64_t * ptr", + "int64x2x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_p8", + "name": "vst1q_s8", "arguments": [ - "poly8_t * ptr", - "poly8x8x3_t val", - "const int lane" + "int8_t * ptr", + "int8x16_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt3.8B" + "register": "Vt.16B" } }, "Architectures": [ @@ -106337,31 +310336,29 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_s16", + "name": "vst1q_s8_x2", "arguments": [ - "int16_t * ptr", - "int16x4x3_t val", - "const int lane" + "int8_t * ptr", + "int8x16x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4H" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" } }, "Architectures": [ @@ -106371,31 +310368,32 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_s32", + "name": "vst1q_s8_x3", "arguments": [ - "int32_t * ptr", - "int32x2x3_t val", - "const int lane" + "int8_t * ptr", + "int8x16x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2S" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" } }, "Architectures": [ @@ -106405,63 +310403,64 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_s64", + "name": "vst1q_s8_x4", "arguments": [ - "int64_t * ptr", - "int64x1x3_t val", - "const int lane" + "int8_t * ptr", + "int8x16x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.1D" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { + "register": "Vt4.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_s8", + "name": "vst1q_u16", "arguments": [ - "int8_t * ptr", - "int8x8x3_t val", - "const int lane" + "uint16_t * ptr", + "uint16x8_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt3.8B" + "register": "Vt.8H" } }, "Architectures": [ @@ -106471,31 +310470,29 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_u16", + "name": "vst1q_u16_x2", "arguments": [ "uint16_t * ptr", - "uint16x4x3_t val", - "const int lane" + "uint16x8x2_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 3 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -106505,31 +310502,32 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_u32", + "name": "vst1q_u16_x3", "arguments": [ - "uint32_t * ptr", - "uint32x2x3_t val", - "const int lane" + "uint16_t * ptr", + "uint16x8x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2S" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ @@ -106539,63 +310537,64 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_u64", + "name": "vst1q_u16_x4", "arguments": [ - "uint64_t * ptr", - "uint64x1x3_t val", - "const int lane" + "uint16_t * ptr", + "uint16x8x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.1D" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_lane_u8", + "name": "vst1q_u32", "arguments": [ - "uint8_t * ptr", - "uint8x8x3_t val", - "const int lane" + "uint32_t * ptr", + "uint32x4_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 7 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt3.8B" + "register": "Vt.4S" } }, "Architectures": [ @@ -106605,16 +310604,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_p16", + "name": "vst1q_u32_x2", "arguments": [ - "poly16_t * ptr", - "poly16x4x3_t val" + "uint32_t * ptr", + "uint32x4x2_t val" ], "return_type": { "value": "void" @@ -106623,8 +310622,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4H" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" } }, "Architectures": [ @@ -106634,16 +310636,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_p64", + "name": "vst1q_u32_x3", "arguments": [ - "poly64_t * ptr", - "poly64x1x3_t val" + "uint32_t * ptr", + "uint32x4x3_t val" ], "return_type": { "value": "void" @@ -106652,11 +310654,18 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.1D" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], @@ -106668,10 +310677,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst3_p8", + "name": "vst1q_u32_x4", "arguments": [ - "poly8_t * ptr", - "poly8x8x3_t val" + "uint32_t * ptr", + "uint32x4x4_t val" ], "return_type": { "value": "void" @@ -106680,8 +310689,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8B" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { + "register": "Vt4.4S" } }, "Architectures": [ @@ -106691,16 +310709,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_s16", + "name": "vst1q_u64", "arguments": [ - "int16_t * ptr", - "int16x4x3_t val" + "uint64_t * ptr", + "uint64x2_t val" ], "return_type": { "value": "void" @@ -106710,7 +310728,7 @@ "register": "Xn" }, "val": { - "register": "Vt3.4H" + "register": "Vt.2D" } }, "Architectures": [ @@ -106720,16 +310738,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_s32", + "name": "vst1q_u64_x2", "arguments": [ - "int32_t * ptr", - "int32x2x3_t val" + "uint64_t * ptr", + "uint64x2x2_t val" ], "return_type": { "value": "void" @@ -106738,8 +310756,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2S" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ @@ -106749,16 +310770,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_s64", + "name": "vst1q_u64_x3", "arguments": [ - "int64_t * ptr", - "int64x1x3_t val" + "uint64_t * ptr", + "uint64x2x3_t val" ], "return_type": { "value": "void" @@ -106767,8 +310788,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ @@ -106784,10 +310811,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst3_s8", + "name": "vst1q_u64_x4", "arguments": [ - "int8_t * ptr", - "int8x8x3_t val" + "uint64_t * ptr", + "uint64x2x4_t val" ], "return_type": { "value": "void" @@ -106796,8 +310823,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8B" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ @@ -106807,16 +310843,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_u16", + "name": "vst1q_u8", "arguments": [ - "uint16_t * ptr", - "uint16x4x3_t val" + "uint8_t * ptr", + "uint8x16_t val" ], "return_type": { "value": "void" @@ -106826,7 +310862,7 @@ "register": "Xn" }, "val": { - "register": "Vt3.4H" + "register": "Vt.16B" } }, "Architectures": [ @@ -106836,16 +310872,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_u32", + "name": "vst1q_u8_x2", "arguments": [ - "uint32_t * ptr", - "uint32x2x3_t val" + "uint8_t * ptr", + "uint8x16x2_t val" ], "return_type": { "value": "void" @@ -106854,8 +310890,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2S" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" } }, "Architectures": [ @@ -106865,16 +310904,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3_u64", + "name": "vst1q_u8_x3", "arguments": [ - "uint64_t * ptr", - "uint64x1x3_t val" + "uint8_t * ptr", + "uint8x16x3_t val" ], "return_type": { "value": "void" @@ -106883,8 +310922,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.1D" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" } }, "Architectures": [ @@ -106900,10 +310945,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vst3_u8", + "name": "vst1q_u8_x4", "arguments": [ "uint8_t * ptr", - "uint8x8x3_t val" + "uint8x16x4_t val" ], "return_type": { "value": "void" @@ -106912,8 +310957,17 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8B" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { + "register": "Vt4.16B" } }, "Architectures": [ @@ -106923,16 +310977,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_f16", + "name": "vst2_f16", "arguments": [ "float16_t * ptr", - "float16x8x3_t val" + "float16x4x2_t val" ], "return_type": { "value": "void" @@ -106941,8 +310995,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -106952,16 +311009,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_f32", + "name": "vst2_f32", "arguments": [ "float32_t * ptr", - "float32x4x3_t val" + "float32x2x2_t val" ], "return_type": { "value": "void" @@ -106970,8 +311027,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" } }, "Architectures": [ @@ -106981,16 +311041,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_f64", + "name": "vst2_f64", "arguments": [ "float64_t * ptr", - "float64x2x3_t val" + "float64x1x2_t val" ], "return_type": { "value": "void" @@ -106999,8 +311059,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ @@ -107008,16 +311071,16 @@ ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_f16", + "name": "vst2_lane_f16", "arguments": [ "float16_t * ptr", - "float16x8x3_t val", + "float16x4x2_t val", "const int lane" ], "return_type": { @@ -107026,13 +311089,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -107042,16 +311108,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_f32", + "name": "vst2_lane_f32", "arguments": [ "float32_t * ptr", - "float32x4x3_t val", + "float32x2x2_t val", "const int lane" ], "return_type": { @@ -107060,13 +311126,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" } }, "Architectures": [ @@ -107076,16 +311145,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_f64", + "name": "vst2_lane_f64", "arguments": [ "float64_t * ptr", - "float64x2x3_t val", + "float64x1x2_t val", "const int lane" ], "return_type": { @@ -107094,13 +311163,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 0 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ @@ -107108,16 +311180,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_p16", + "name": "vst2_lane_p16", "arguments": [ "poly16_t * ptr", - "poly16x8x3_t val", + "poly16x4x2_t val", "const int lane" ], "return_type": { @@ -107126,13 +311198,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -107142,16 +311217,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_p64", + "name": "vst2_lane_p64", "arguments": [ "poly64_t * ptr", - "poly64x2x3_t val", + "poly64x1x2_t val", "const int lane" ], "return_type": { @@ -107160,13 +311235,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 0 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ @@ -107174,16 +311252,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_p8", + "name": "vst2_lane_p8", "arguments": [ "poly8_t * ptr", - "poly8x16x3_t val", + "poly8x8x2_t val", "const int lane" ], "return_type": { @@ -107192,13 +311270,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 15 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" } }, "Architectures": [ @@ -107208,16 +311289,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_s16", + "name": "vst2_lane_s16", "arguments": [ "int16_t * ptr", - "int16x8x3_t val", + "int16x4x2_t val", "const int lane" ], "return_type": { @@ -107226,13 +311307,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -107242,16 +311326,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_s32", + "name": "vst2_lane_s32", "arguments": [ "int32_t * ptr", - "int32x4x3_t val", + "int32x2x2_t val", "const int lane" ], "return_type": { @@ -107260,13 +311344,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" } }, "Architectures": [ @@ -107276,16 +311363,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_s64", + "name": "vst2_lane_s64", "arguments": [ "int64_t * ptr", - "int64x2x3_t val", + "int64x1x2_t val", "const int lane" ], "return_type": { @@ -107294,13 +311381,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 0 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ @@ -107308,16 +311398,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_s8", + "name": "vst2_lane_s8", "arguments": [ "int8_t * ptr", - "int8x16x3_t val", + "int8x8x2_t val", "const int lane" ], "return_type": { @@ -107326,13 +311416,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 15 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" } }, "Architectures": [ @@ -107342,16 +311435,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_u16", + "name": "vst2_lane_u16", "arguments": [ "uint16_t * ptr", - "uint16x8x3_t val", + "uint16x4x2_t val", "const int lane" ], "return_type": { @@ -107360,13 +311453,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -107376,16 +311472,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_u32", + "name": "vst2_lane_u32", "arguments": [ "uint32_t * ptr", - "uint32x4x3_t val", + "uint32x2x2_t val", "const int lane" ], "return_type": { @@ -107394,13 +311490,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" } }, "Architectures": [ @@ -107410,16 +311509,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_u64", + "name": "vst2_lane_u64", "arguments": [ "uint64_t * ptr", - "uint64x2x3_t val", + "uint64x1x2_t val", "const int lane" ], "return_type": { @@ -107428,13 +311527,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 0 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ @@ -107442,16 +311544,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_lane_u8", + "name": "vst2_lane_u8", "arguments": [ "uint8_t * ptr", - "uint8x16x3_t val", + "uint8x8x2_t val", "const int lane" ], "return_type": { @@ -107460,13 +311562,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 15 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" } }, "Architectures": [ @@ -107476,16 +311581,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_p16", + "name": "vst2_p16", "arguments": [ "poly16_t * ptr", - "poly16x8x3_t val" + "poly16x4x2_t val" ], "return_type": { "value": "void" @@ -107494,8 +311599,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -107505,16 +311613,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_p64", + "name": "vst2_p64", "arguments": [ "poly64_t * ptr", - "poly64x2x3_t val" + "poly64x1x2_t val" ], "return_type": { "value": "void" @@ -107523,25 +311631,29 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_p8", + "name": "vst2_p8", "arguments": [ "poly8_t * ptr", - "poly8x16x3_t val" + "poly8x8x2_t val" ], "return_type": { "value": "void" @@ -107550,8 +311662,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" } }, "Architectures": [ @@ -107561,16 +311676,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_s16", + "name": "vst2_s16", "arguments": [ "int16_t * ptr", - "int16x8x3_t val" + "int16x4x2_t val" ], "return_type": { "value": "void" @@ -107579,8 +311694,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -107590,16 +311708,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_s32", + "name": "vst2_s32", "arguments": [ "int32_t * ptr", - "int32x4x3_t val" + "int32x2x2_t val" ], "return_type": { "value": "void" @@ -107608,8 +311726,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" } }, "Architectures": [ @@ -107619,16 +311740,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_s64", + "name": "vst2_s64", "arguments": [ "int64_t * ptr", - "int64x2x3_t val" + "int64x1x2_t val" ], "return_type": { "value": "void" @@ -107637,25 +311758,30 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_s8", + "name": "vst2_s8", "arguments": [ "int8_t * ptr", - "int8x16x3_t val" + "int8x8x2_t val" ], "return_type": { "value": "void" @@ -107664,8 +311790,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" } }, "Architectures": [ @@ -107675,16 +311804,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_u16", + "name": "vst2_u16", "arguments": [ "uint16_t * ptr", - "uint16x8x3_t val" + "uint16x4x2_t val" ], "return_type": { "value": "void" @@ -107693,8 +311822,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" } }, "Architectures": [ @@ -107704,16 +311836,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_u32", + "name": "vst2_u32", "arguments": [ "uint32_t * ptr", - "uint32x4x3_t val" + "uint32x2x2_t val" ], "return_type": { "value": "void" @@ -107722,8 +311854,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" } }, "Architectures": [ @@ -107733,16 +311868,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_u64", + "name": "vst2_u64", "arguments": [ "uint64_t * ptr", - "uint64x2x3_t val" + "uint64x1x2_t val" ], "return_type": { "value": "void" @@ -107751,25 +311886,30 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST3" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst3q_u8", + "name": "vst2_u8", "arguments": [ "uint8_t * ptr", - "uint8x16x3_t val" + "uint8x8x2_t val" ], "return_type": { "value": "void" @@ -107778,8 +311918,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt3.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" } }, "Architectures": [ @@ -107789,16 +311932,16 @@ ], "instructions": [ [ - "ST3" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_f16", + "name": "vst2q_f16", "arguments": [ "float16_t * ptr", - "float16x4x4_t val" + "float16x8x2_t val" ], "return_type": { "value": "void" @@ -107807,8 +311950,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -107818,16 +311964,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_f32", + "name": "vst2q_f32", "arguments": [ "float32_t * ptr", - "float32x2x4_t val" + "float32x4x2_t val" ], "return_type": { "value": "void" @@ -107836,8 +311982,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2S" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" } }, "Architectures": [ @@ -107847,16 +311996,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_f64", + "name": "vst2q_f64", "arguments": [ "float64_t * ptr", - "float64x1x4_t val" + "float64x2x2_t val" ], "return_type": { "value": "void" @@ -107865,8 +312014,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ @@ -107874,16 +312026,16 @@ ], "instructions": [ [ - "ST1" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_f16", + "name": "vst2q_lane_f16", "arguments": [ "float16_t * ptr", - "float16x4x4_t val", + "float16x8x2_t val", "const int lane" ], "return_type": { @@ -107892,13 +312044,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -107908,16 +312063,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_f32", + "name": "vst2q_lane_f32", "arguments": [ "float32_t * ptr", - "float32x2x4_t val", + "float32x4x2_t val", "const int lane" ], "return_type": { @@ -107926,13 +312081,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2S" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" } }, "Architectures": [ @@ -107942,16 +312100,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_f64", + "name": "vst2q_lane_f64", "arguments": [ "float64_t * ptr", - "float64x1x4_t val", + "float64x2x2_t val", "const int lane" ], "return_type": { @@ -107960,13 +312118,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 0 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ @@ -107974,16 +312135,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_p16", + "name": "vst2q_lane_p16", "arguments": [ "poly16_t * ptr", - "poly16x4x4_t val", + "poly16x8x2_t val", "const int lane" ], "return_type": { @@ -107992,13 +312153,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -108008,16 +312172,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_p64", + "name": "vst2q_lane_p64", "arguments": [ "poly64_t * ptr", - "poly64x1x4_t val", + "poly64x2x2_t val", "const int lane" ], "return_type": { @@ -108026,13 +312190,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 0 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ @@ -108040,16 +312207,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_p8", + "name": "vst2q_lane_p8", "arguments": [ "poly8_t * ptr", - "poly8x8x4_t val", + "poly8x16x2_t val", "const int lane" ], "return_type": { @@ -108058,32 +312225,33 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 15 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8B" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_s16", + "name": "vst2q_lane_s16", "arguments": [ "int16_t * ptr", - "int16x4x4_t val", + "int16x8x2_t val", "const int lane" ], "return_type": { @@ -108092,13 +312260,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -108108,16 +312279,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_s32", + "name": "vst2q_lane_s32", "arguments": [ "int32_t * ptr", - "int32x2x4_t val", + "int32x4x2_t val", "const int lane" ], "return_type": { @@ -108126,13 +312297,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2S" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" } }, "Architectures": [ @@ -108142,16 +312316,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_s64", + "name": "vst2q_lane_s64", "arguments": [ "int64_t * ptr", - "int64x1x4_t val", + "int64x2x2_t val", "const int lane" ], "return_type": { @@ -108160,13 +312334,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 0 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ @@ -108174,16 +312351,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_s8", + "name": "vst2q_lane_s8", "arguments": [ "int8_t * ptr", - "int8x8x4_t val", + "int8x16x2_t val", "const int lane" ], "return_type": { @@ -108192,32 +312369,33 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 15 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8B" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_u16", + "name": "vst2q_lane_u16", "arguments": [ "uint16_t * ptr", - "uint16x4x4_t val", + "uint16x8x2_t val", "const int lane" ], "return_type": { @@ -108226,13 +312404,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -108242,16 +312423,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_u32", + "name": "vst2q_lane_u32", "arguments": [ "uint32_t * ptr", - "uint32x2x4_t val", + "uint32x4x2_t val", "const int lane" ], "return_type": { @@ -108260,13 +312441,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2S" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" } }, "Architectures": [ @@ -108276,16 +312460,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_u64", + "name": "vst2q_lane_u64", "arguments": [ "uint64_t * ptr", - "uint64x1x4_t val", + "uint64x2x2_t val", "const int lane" ], "return_type": { @@ -108294,13 +312478,16 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 0 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ @@ -108308,16 +312495,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_lane_u8", + "name": "vst2q_lane_u8", "arguments": [ "uint8_t * ptr", - "uint8x8x4_t val", + "uint8x16x2_t val", "const int lane" ], "return_type": { @@ -108326,32 +312513,33 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 15 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8B" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_p16", + "name": "vst2q_p16", "arguments": [ "poly16_t * ptr", - "poly16x4x4_t val" + "poly16x8x2_t val" ], "return_type": { "value": "void" @@ -108360,8 +312548,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -108371,16 +312562,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_p64", + "name": "vst2q_p64", "arguments": [ "poly64_t * ptr", - "poly64x1x4_t val" + "poly64x2x2_t val" ], "return_type": { "value": "void" @@ -108389,26 +312580,28 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "ST1" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_p8", + "name": "vst2q_p8", "arguments": [ "poly8_t * ptr", - "poly8x8x4_t val" + "poly8x16x2_t val" ], "return_type": { "value": "void" @@ -108417,8 +312610,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8B" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" } }, "Architectures": [ @@ -108428,16 +312624,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_s16", + "name": "vst2q_s16", "arguments": [ "int16_t * ptr", - "int16x4x4_t val" + "int16x8x2_t val" ], "return_type": { "value": "void" @@ -108446,8 +312642,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -108457,16 +312656,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_s32", + "name": "vst2q_s32", "arguments": [ "int32_t * ptr", - "int32x2x4_t val" + "int32x4x2_t val" ], "return_type": { "value": "void" @@ -108475,8 +312674,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2S" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" } }, "Architectures": [ @@ -108486,16 +312688,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_s64", + "name": "vst2q_s64", "arguments": [ "int64_t * ptr", - "int64x1x4_t val" + "int64x2x2_t val" ], "return_type": { "value": "void" @@ -108504,27 +312706,28 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_s8", + "name": "vst2q_s8", "arguments": [ "int8_t * ptr", - "int8x8x4_t val" + "int8x16x2_t val" ], "return_type": { "value": "void" @@ -108533,8 +312736,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8B" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" } }, "Architectures": [ @@ -108544,16 +312750,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_u16", + "name": "vst2q_u16", "arguments": [ "uint16_t * ptr", - "uint16x4x4_t val" + "uint16x8x2_t val" ], "return_type": { "value": "void" @@ -108562,8 +312768,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4H" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" } }, "Architectures": [ @@ -108573,16 +312782,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_u32", + "name": "vst2q_u32", "arguments": [ "uint32_t * ptr", - "uint32x2x4_t val" + "uint32x4x2_t val" ], "return_type": { "value": "void" @@ -108591,8 +312800,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2S" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" } }, "Architectures": [ @@ -108602,16 +312814,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_u64", + "name": "vst2q_u64", "arguments": [ "uint64_t * ptr", - "uint64x1x4_t val" + "uint64x2x2_t val" ], "return_type": { "value": "void" @@ -108620,27 +312832,28 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "ST1" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4_u8", + "name": "vst2q_u8", "arguments": [ "uint8_t * ptr", - "uint8x8x4_t val" + "uint8x16x2_t val" ], "return_type": { "value": "void" @@ -108649,8 +312862,11 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8B" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" } }, "Architectures": [ @@ -108660,16 +312876,16 @@ ], "instructions": [ [ - "ST4" + "ST2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_f16", + "name": "vst3_f16", "arguments": [ "float16_t * ptr", - "float16x8x4_t val" + "float16x4x3_t val" ], "return_type": { "value": "void" @@ -108678,8 +312894,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -108689,16 +312911,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_f32", + "name": "vst3_f32", "arguments": [ "float32_t * ptr", - "float32x4x4_t val" + "float32x2x3_t val" ], "return_type": { "value": "void" @@ -108707,8 +312929,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" } }, "Architectures": [ @@ -108718,16 +312946,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_f64", + "name": "vst3_f64", "arguments": [ "float64_t * ptr", - "float64x2x4_t val" + "float64x1x3_t val" ], "return_type": { "value": "void" @@ -108736,8 +312964,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ @@ -108745,16 +312979,16 @@ ], "instructions": [ [ - "ST4" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_f16", + "name": "vst3_lane_f16", "arguments": [ "float16_t * ptr", - "float16x8x4_t val", + "float16x4x3_t val", "const int lane" ], "return_type": { @@ -108763,13 +312997,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -108779,16 +313019,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_f32", + "name": "vst3_lane_f32", "arguments": [ "float32_t * ptr", - "float32x4x4_t val", + "float32x2x3_t val", "const int lane" ], "return_type": { @@ -108797,13 +313037,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" } }, "Architectures": [ @@ -108813,16 +313059,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_f64", + "name": "vst3_lane_f64", "arguments": [ "float64_t * ptr", - "float64x2x4_t val", + "float64x1x3_t val", "const int lane" ], "return_type": { @@ -108831,13 +313077,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 0 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ @@ -108845,16 +313097,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_p16", + "name": "vst3_lane_p16", "arguments": [ "poly16_t * ptr", - "poly16x8x4_t val", + "poly16x4x3_t val", "const int lane" ], "return_type": { @@ -108863,13 +313115,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -108879,16 +313137,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_p64", + "name": "vst3_lane_p64", "arguments": [ "poly64_t * ptr", - "poly64x2x4_t val", + "poly64x1x3_t val", "const int lane" ], "return_type": { @@ -108897,13 +313155,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 0 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ @@ -108911,16 +313175,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_p8", + "name": "vst3_lane_p8", "arguments": [ "poly8_t * ptr", - "poly8x16x4_t val", + "poly8x8x3_t val", "const int lane" ], "return_type": { @@ -108929,30 +313193,38 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 15 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_s16", + "name": "vst3_lane_s16", "arguments": [ "int16_t * ptr", - "int16x8x4_t val", + "int16x4x3_t val", "const int lane" ], "return_type": { @@ -108961,13 +313233,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -108977,16 +313255,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_s32", + "name": "vst3_lane_s32", "arguments": [ "int32_t * ptr", - "int32x4x4_t val", + "int32x2x3_t val", "const int lane" ], "return_type": { @@ -108995,13 +313273,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" } }, "Architectures": [ @@ -109011,16 +313295,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_s64", + "name": "vst3_lane_s64", "arguments": [ "int64_t * ptr", - "int64x2x4_t val", + "int64x1x3_t val", "const int lane" ], "return_type": { @@ -109029,13 +313313,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 0 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ @@ -109043,16 +313333,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_s8", + "name": "vst3_lane_s8", "arguments": [ "int8_t * ptr", - "int8x16x4_t val", + "int8x8x3_t val", "const int lane" ], "return_type": { @@ -109061,30 +313351,38 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 15 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_u16", + "name": "vst3_lane_u16", "arguments": [ "uint16_t * ptr", - "uint16x8x4_t val", + "uint16x4x3_t val", "const int lane" ], "return_type": { @@ -109093,13 +313391,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 7 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -109109,16 +313413,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_u32", + "name": "vst3_lane_u32", "arguments": [ "uint32_t * ptr", - "uint32x4x4_t val", + "uint32x2x3_t val", "const int lane" ], "return_type": { @@ -109127,13 +313431,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 3 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" } }, "Architectures": [ @@ -109143,16 +313453,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_u64", + "name": "vst3_lane_u64", "arguments": [ "uint64_t * ptr", - "uint64x2x4_t val", + "uint64x1x3_t val", "const int lane" ], "return_type": { @@ -109161,13 +313471,19 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 0 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ @@ -109175,16 +313491,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_lane_u8", + "name": "vst3_lane_u8", "arguments": [ "uint8_t * ptr", - "uint8x16x4_t val", + "uint8x8x3_t val", "const int lane" ], "return_type": { @@ -109193,30 +313509,38 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 15 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_p16", + "name": "vst3_p16", "arguments": [ "poly16_t * ptr", - "poly16x8x4_t val" + "poly16x4x3_t val" ], "return_type": { "value": "void" @@ -109225,8 +313549,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -109236,16 +313566,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_p64", + "name": "vst3_p64", "arguments": [ "poly64_t * ptr", - "poly64x2x4_t val" + "poly64x1x3_t val" ], "return_type": { "value": "void" @@ -109254,25 +313584,32 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "ST4" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_p8", + "name": "vst3_p8", "arguments": [ "poly8_t * ptr", - "poly8x16x4_t val" + "poly8x8x3_t val" ], "return_type": { "value": "void" @@ -109281,8 +313618,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" } }, "Architectures": [ @@ -109292,16 +313635,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_s16", + "name": "vst3_s16", "arguments": [ "int16_t * ptr", - "int16x8x4_t val" + "int16x4x3_t val" ], "return_type": { "value": "void" @@ -109310,8 +313653,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -109321,16 +313670,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_s32", + "name": "vst3_s32", "arguments": [ "int32_t * ptr", - "int32x4x4_t val" + "int32x2x3_t val" ], "return_type": { "value": "void" @@ -109339,8 +313688,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" } }, "Architectures": [ @@ -109350,16 +313705,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_s64", + "name": "vst3_s64", "arguments": [ "int64_t * ptr", - "int64x2x4_t val" + "int64x1x3_t val" ], "return_type": { "value": "void" @@ -109368,25 +313723,33 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST4" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_s8", + "name": "vst3_s8", "arguments": [ "int8_t * ptr", - "int8x16x4_t val" + "int8x8x3_t val" ], "return_type": { "value": "void" @@ -109395,8 +313758,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" } }, "Architectures": [ @@ -109406,16 +313775,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_u16", + "name": "vst3_u16", "arguments": [ "uint16_t * ptr", - "uint16x8x4_t val" + "uint16x4x3_t val" ], "return_type": { "value": "void" @@ -109424,8 +313793,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" } }, "Architectures": [ @@ -109435,16 +313810,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_u32", + "name": "vst3_u32", "arguments": [ "uint32_t * ptr", - "uint32x4x4_t val" + "uint32x2x3_t val" ], "return_type": { "value": "void" @@ -109453,8 +313828,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" } }, "Architectures": [ @@ -109464,16 +313845,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_u64", + "name": "vst3_u64", "arguments": [ "uint64_t * ptr", - "uint64x2x4_t val" + "uint64x1x3_t val" ], "return_type": { "value": "void" @@ -109482,25 +313863,33 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "ST4" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vst4q_u8", + "name": "vst3_u8", "arguments": [ "uint8_t * ptr", - "uint8x16x4_t val" + "uint8x8x3_t val" ], "return_type": { "value": "void" @@ -109509,8 +313898,14 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Vt4.16B" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" } }, "Architectures": [ @@ -109520,16 +313915,16 @@ ], "instructions": [ [ - "ST4" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vstrq_p128", + "name": "vst3q_f16", "arguments": [ - "poly128_t * ptr", - "poly128_t val" + "float16_t * ptr", + "float16x8x3_t val" ], "return_type": { "value": "void" @@ -109538,105 +313933,84 @@ "ptr": { "register": "Xn" }, - "val": { - "register": "Qt" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "STR" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vstl1_lane_f64", + "name": "vst3q_f32", "arguments": [ - "float64_t * ptr", - "float64x1_t val", - "const int lane" + "float32_t * ptr", + "float32x4x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.1D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STL1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vstl1q_lane_f64", - "arguments": [ - "float64_t * ptr", - "float64x2_t val", - "const int lane" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 + "val.val[0]": { + "register": "Vt.4S" }, - "ptr": { - "register": "Xn" + "val.val[1]": { + "register": "Vt2.4S" }, - "val": { - "register": "Vt.2D" + "val.val[2]": { + "register": "Vt3.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "STL1" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vstl1_lane_p64", + "name": "vst3q_f64", "arguments": [ - "poly64_t * ptr", - "poly64x1_t val", - "const int lane" + "float64_t * ptr", + "float64x2x3_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.1D" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ @@ -109644,16 +314018,16 @@ ], "instructions": [ [ - "STL1" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vstl1q_lane_p64", + "name": "vst3q_lane_f16", "arguments": [ - "poly64_t * ptr", - "poly64x2_t val", + "float16_t * ptr", + "float16x8x3_t val", "const int lane" ], "return_type": { @@ -109662,30 +314036,38 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 1 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.2D" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "STL1" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vstl1_lane_u64", + "name": "vst3q_lane_f32", "arguments": [ - "uint64_t * ptr", - "uint64x1_t val", + "float32_t * ptr", + "float32x4x3_t val", "const int lane" ], "return_type": { @@ -109694,30 +314076,38 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 0 + "maximum": 3 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.1D" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "STL1" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vstl1q_lane_u64", + "name": "vst3q_lane_f64", "arguments": [ - "uint64_t * ptr", - "uint64x2_t val", + "float64_t * ptr", + "float64x2x3_t val", "const int lane" ], "return_type": { @@ -109731,8 +314121,14 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ @@ -109740,16 +314136,16 @@ ], "instructions": [ [ - "STL1" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vstl1_lane_s64", + "name": "vst3q_lane_p16", "arguments": [ - "int64_t * ptr", - "int64x1_t val", + "poly16_t * ptr", + "poly16x8x3_t val", "const int lane" ], "return_type": { @@ -109758,30 +314154,38 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 0 + "maximum": 7 }, "ptr": { "register": "Xn" }, - "val": { - "register": "Vt.1D" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "STL1" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vstl1q_lane_s64", + "name": "vst3q_lane_p64", "arguments": [ - "int64_t * ptr", - "int64x2_t val", + "poly64_t * ptr", + "poly64x2x3_t val", "const int lane" ], "return_type": { @@ -109795,8 +314199,14 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ @@ -109804,54 +314214,77 @@ ], "instructions": [ [ - "STL1" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_f16", + "name": "vst3q_lane_p8", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "poly8_t * ptr", + "poly8x16x3_t val", + "const int lane" ], "return_type": { - "value": "float16x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 15 }, - "b": { - "register": "Vm.4H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FSUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_f32", + "name": "vst3q_lane_s16", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "int16_t * ptr", + "int16x8x3_t val", + "const int lane" ], "return_type": { - "value": "float32x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.2S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ @@ -109861,82 +314294,115 @@ ], "instructions": [ [ - "FSUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_f64", + "name": "vst3q_lane_s32", "arguments": [ - "float64x1_t a", - "float64x1_t b" + "int32_t * ptr", + "int32x4x3_t val", + "const int lane" ], "return_type": { - "value": "float64x1_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Dm" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FSUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_s16", + "name": "vst3q_lane_s64", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "int64_t * ptr", + "int64x2x3_t val", + "const int lane" ], "return_type": { - "value": "int16x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.4H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_s32", + "name": "vst3q_lane_s8", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "int8_t * ptr", + "int8x16x3_t val", + "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "lane": { + "minimum": 0, + "maximum": 15 }, - "b": { - "register": "Vm.2S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" } }, "Architectures": [ @@ -109946,26 +314412,37 @@ ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_s64", + "name": "vst3q_lane_u16", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "uint16_t * ptr", + "uint16x8x3_t val", + "const int lane" ], "return_type": { - "value": "int64x1_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Dm" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ @@ -109975,26 +314452,37 @@ ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_s8", + "name": "vst3q_lane_u32", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint32_t * ptr", + "uint32x4x3_t val", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.8B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" } }, "Architectures": [ @@ -110004,55 +314492,75 @@ ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_u16", + "name": "vst3q_lane_u64", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "uint64_t * ptr", + "uint64x2x3_t val", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.4H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_u32", + "name": "vst3q_lane_u8", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "uint8_t * ptr", + "uint8x16x3_t val", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "lane": { + "minimum": 0, + "maximum": 15 }, - "b": { - "register": "Vm.2S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" } }, "Architectures": [ @@ -110062,26 +314570,32 @@ ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_u64", + "name": "vst3q_p16", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "poly16_t * ptr", + "poly16x8x3_t val" ], "return_type": { - "value": "uint64x1_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Dm" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ @@ -110091,141 +314605,170 @@ ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsub_u8", + "name": "vst3q_p64", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "poly64_t * ptr", + "poly64x2x3_t val" ], "return_type": { - "value": "uint8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8B" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubd_s64", + "name": "vst3q_p8", "arguments": [ - "int64_t a", - "int64_t b" + "poly8_t * ptr", + "poly8x16x3_t val" ], "return_type": { - "value": "int64_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Dm" + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubd_u64", + "name": "vst3q_s16", "arguments": [ - "uint64_t a", - "uint64_t b" + "int16_t * ptr", + "int16x8x3_t val" ], "return_type": { - "value": "uint64_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Dm" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubh_f16", + "name": "vst3q_s32", "arguments": [ - "float16_t a", - "float16_t b" + "int32_t * ptr", + "int32x4x3_t val" ], "return_type": { - "value": "float16_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Hn" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Hm" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "FSUB" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_high_s16", + "name": "vst3q_s64", "arguments": [ - "int8x8_t r", - "int16x8_t a", - "int16x8_t b" + "int64_t * ptr", + "int64x2x3_t val" ], "return_type": { - "value": "int8x16_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8H" + "val.val[0]": { + "register": "Vt.2D" }, - "r": { - "register": "Vd.8B" + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ @@ -110233,123 +314776,137 @@ ], "instructions": [ [ - "SUBHN2" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_high_s32", + "name": "vst3q_s8", "arguments": [ - "int16x4_t r", - "int32x4_t a", - "int32x4_t b" + "int8_t * ptr", + "int8x16x3_t val" ], "return_type": { - "value": "int16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.4S" + "val.val[0]": { + "register": "Vt.16B" }, - "r": { - "register": "Vd.4H" + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SUBHN2" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_high_s64", + "name": "vst3q_u16", "arguments": [ - "int32x2_t r", - "int64x2_t a", - "int64x2_t b" + "uint16_t * ptr", + "uint16x8x3_t val" ], "return_type": { - "value": "int32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.2D" + "val.val[0]": { + "register": "Vt.8H" }, - "r": { - "register": "Vd.2S" + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SUBHN2" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_high_u16", + "name": "vst3q_u32", "arguments": [ - "uint8x8_t r", - "uint16x8_t a", - "uint16x8_t b" + "uint32_t * ptr", + "uint32x4x3_t val" ], "return_type": { - "value": "uint8x16_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8H" + "val.val[0]": { + "register": "Vt.4S" }, - "r": { - "register": "Vd.8B" + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SUBHN2" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_high_u32", + "name": "vst3q_u64", "arguments": [ - "uint16x4_t r", - "uint32x4_t a", - "uint32x4_t b" + "uint64_t * ptr", + "uint64x2x3_t val" ], "return_type": { - "value": "uint16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.4S" + "val.val[0]": { + "register": "Vt.2D" }, - "r": { - "register": "Vd.4H" + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" } }, "Architectures": [ @@ -110357,57 +314914,70 @@ ], "instructions": [ [ - "SUBHN2" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_high_u64", + "name": "vst3q_u8", "arguments": [ - "uint32x2_t r", - "uint64x2_t a", - "uint64x2_t b" + "uint8_t * ptr", + "uint8x16x3_t val" ], "return_type": { - "value": "uint32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.2D" + "val.val[0]": { + "register": "Vt.16B" }, - "r": { - "register": "Vd.2S" + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SUBHN2" + "ST3" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_s16", + "name": "vst4_f16", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "float16_t * ptr", + "float16x4x4_t val" ], "return_type": { - "value": "int8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8H" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ @@ -110417,26 +314987,35 @@ ], "instructions": [ [ - "SUBHN" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_s32", + "name": "vst4_f32", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "float32_t * ptr", + "float32x2x4_t val" ], "return_type": { - "value": "int16x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.4S" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { + "register": "Vt4.2S" } }, "Architectures": [ @@ -110446,55 +315025,76 @@ ], "instructions": [ [ - "SUBHN" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_s64", + "name": "vst4_f64", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "float64_t * ptr", + "float64x1x4_t val" ], "return_type": { - "value": "int32x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.2D" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SUBHN" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_u16", + "name": "vst4_lane_f16", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "float16_t * ptr", + "float16x4x4_t val", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.8H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ @@ -110504,26 +315104,40 @@ ], "instructions": [ [ - "SUBHN" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_u32", + "name": "vst4_lane_f32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "float32_t * ptr", + "float32x2x4_t val", + "const int lane" ], "return_type": { - "value": "uint16x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.4S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { + "register": "Vt4.2S" } }, "Architectures": [ @@ -110533,82 +315147,124 @@ ], "instructions": [ [ - "SUBHN" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubhn_u64", + "name": "vst4_lane_f64", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "float64_t * ptr", + "float64x1x4_t val", + "const int lane" ], "return_type": { - "value": "uint32x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "lane": { + "minimum": 0, + "maximum": 0 }, - "b": { - "register": "Vm.2D" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SUBHN" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_high_s16", + "name": "vst4_lane_p16", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "poly16_t * ptr", + "poly16x4x4_t val", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.8H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSUBL2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_high_s32", + "name": "vst4_lane_p64", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "poly64_t * ptr", + "poly64x1x4_t val", + "const int lane" ], "return_type": { - "value": "int64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "lane": { + "minimum": 0, + "maximum": 0 }, - "b": { - "register": "Vm.4S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ @@ -110616,107 +315272,169 @@ ], "instructions": [ [ - "SSUBL2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_high_s8", + "name": "vst4_lane_p8", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "poly8_t * ptr", + "poly8x8x4_t val", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSUBL2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_high_u16", + "name": "vst4_lane_s16", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "int16_t * ptr", + "int16x4x4_t val", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.8H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USUBL2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_high_u32", + "name": "vst4_lane_s32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int32_t * ptr", + "int32x2x4_t val", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.4S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { + "register": "Vt4.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USUBL2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_high_u8", + "name": "vst4_lane_s64", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "int64_t * ptr", + "int64x1x4_t val", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 0 }, - "b": { - "register": "Vm.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ @@ -110724,26 +315442,40 @@ ], "instructions": [ [ - "USUBL2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_s16", + "name": "vst4_lane_s8", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "int8_t * ptr", + "int8x8x4_t val", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.4H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ @@ -110753,26 +315485,40 @@ ], "instructions": [ [ - "SSUBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_s32", + "name": "vst4_lane_u16", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "uint16_t * ptr", + "uint16x4x4_t val", + "const int lane" ], "return_type": { - "value": "int64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.2S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ @@ -110782,26 +315528,40 @@ ], "instructions": [ [ - "SSUBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_s8", + "name": "vst4_lane_u32", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint32_t * ptr", + "uint32x2x4_t val", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.8B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { + "register": "Vt4.2S" } }, "Architectures": [ @@ -110811,55 +315571,81 @@ ], "instructions": [ [ - "SSUBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_u16", + "name": "vst4_lane_u64", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "uint64_t * ptr", + "uint64x1x4_t val", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "lane": { + "minimum": 0, + "maximum": 0 }, - "b": { - "register": "Vm.4H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USUBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_u32", + "name": "vst4_lane_u8", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "uint8_t * ptr", + "uint8x8x4_t val", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.2S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ @@ -110869,26 +315655,35 @@ ], "instructions": [ [ - "USUBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubl_u8", + "name": "vst4_p16", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "poly16_t * ptr", + "poly16x4x4_t val" ], "return_type": { - "value": "uint16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8B" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ @@ -110898,26 +315693,35 @@ ], "instructions": [ [ - "USUBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_f16", + "name": "vst4_p64", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "poly64_t * ptr", + "poly64x1x4_t val" ], "return_type": { - "value": "float16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8H" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ @@ -110926,26 +315730,35 @@ ], "instructions": [ [ - "FSUB" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_f32", + "name": "vst4_p8", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "poly8_t * ptr", + "poly8x8x4_t val" ], "return_type": { - "value": "float32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.4S" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ @@ -110955,53 +315768,73 @@ ], "instructions": [ [ - "FSUB" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_f64", + "name": "vst4_s16", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "int16_t * ptr", + "int16x4x4_t val" ], "return_type": { - "value": "float64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.2D" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "FSUB" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_s16", + "name": "vst4_s32", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int32_t * ptr", + "int32x2x4_t val" ], "return_type": { - "value": "int16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8H" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { + "register": "Vt4.2S" } }, "Architectures": [ @@ -111011,26 +315844,35 @@ ], "instructions": [ [ - "SUB" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_s32", + "name": "vst4_s64", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int64_t * ptr", + "int64x1x4_t val" ], "return_type": { - "value": "int32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.4S" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ @@ -111040,26 +315882,35 @@ ], "instructions": [ [ - "SUB" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_s64", + "name": "vst4_s8", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "int8_t * ptr", + "int8x8x4_t val" ], "return_type": { - "value": "int64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.2D" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ @@ -111069,26 +315920,35 @@ ], "instructions": [ [ - "SUB" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_s8", + "name": "vst4_u16", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "uint16_t * ptr", + "uint16x4x4_t val" ], "return_type": { - "value": "int8x16_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.16B" + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { + "register": "Vt4.4H" } }, "Architectures": [ @@ -111098,26 +315958,35 @@ ], "instructions": [ [ - "SUB" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_u16", + "name": "vst4_u32", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "uint32_t * ptr", + "uint32x2x4_t val" ], "return_type": { - "value": "uint16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8H" + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { + "register": "Vt4.2S" } }, "Architectures": [ @@ -111127,26 +315996,35 @@ ], "instructions": [ [ - "SUB" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_u32", + "name": "vst4_u64", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "uint64_t * ptr", + "uint64x1x4_t val" ], "return_type": { - "value": "uint32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.4S" + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { + "register": "Vt4.1D" } }, "Architectures": [ @@ -111156,26 +316034,35 @@ ], "instructions": [ [ - "SUB" + "ST1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_u64", + "name": "vst4_u8", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "uint8_t * ptr", + "uint8x8x4_t val" ], "return_type": { - "value": "uint64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.2D" + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" } }, "Architectures": [ @@ -111185,26 +316072,35 @@ ], "instructions": [ [ - "SUB" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubq_u8", + "name": "vst4q_f16", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "float16_t * ptr", + "float16x8x4_t val" ], "return_type": { - "value": "uint8x16_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.16B" + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ @@ -111214,53 +316110,73 @@ ], "instructions": [ [ - "SUB" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_high_s16", + "name": "vst4q_f32", "arguments": [ - "int32x4_t a", - "int16x8_t b" + "float32_t * ptr", + "float32x4x4_t val" ], "return_type": { - "value": "int32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.8H" + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { + "register": "Vt4.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSUBW2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_high_s32", + "name": "vst4q_f64", "arguments": [ - "int64x2_t a", - "int32x4_t b" + "float64_t * ptr", + "float64x2x4_t val" ], "return_type": { - "value": "int64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "ptr": { + "register": "Xn" }, - "b": { - "register": "Vm.4S" + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ @@ -111268,80 +316184,126 @@ ], "instructions": [ [ - "SSUBW2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_high_s8", + "name": "vst4q_lane_f16", "arguments": [ - "int16x8_t a", - "int8x16_t b" + "float16_t * ptr", + "float16x8x4_t val", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SSUBW2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_high_u16", + "name": "vst4q_lane_f32", "arguments": [ - "uint32x4_t a", - "uint16x8_t b" + "float32_t * ptr", + "float32x4x4_t val", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.8H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { + "register": "Vt4.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USUBW2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_high_u32", + "name": "vst4q_lane_f64", "arguments": [ - "uint64x2_t a", - "uint32x4_t b" + "float64_t * ptr", + "float64x2x4_t val", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.4S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ @@ -111349,111 +316311,165 @@ ], "instructions": [ [ - "USUBW2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_high_u8", + "name": "vst4q_lane_p16", "arguments": [ - "uint16x8_t a", - "uint8x16_t b" + "poly16_t * ptr", + "poly16x8x4_t val", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "USUBW2" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_s16", + "name": "vst4q_lane_p64", "arguments": [ - "int32x4_t a", - "int16x4_t b" + "poly64_t * ptr", + "poly64x2x4_t val", + "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.4H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSUBW" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_s32", + "name": "vst4q_lane_p8", "arguments": [ - "int64x2_t a", - "int32x2_t b" + "poly8_t * ptr", + "poly8x16x4_t val", + "const int lane" ], "return_type": { - "value": "int64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "lane": { + "minimum": 0, + "maximum": 15 }, - "b": { - "register": "Vm.2S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { + "register": "Vt4.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "SSUBW" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_s8", + "name": "vst4q_lane_s16", "arguments": [ - "int16x8_t a", - "int8x8_t b" + "int16_t * ptr", + "int16x8x4_t val", + "const int lane" ], "return_type": { - "value": "int16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "lane": { + "minimum": 0, + "maximum": 7 }, - "b": { - "register": "Vm.8B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ @@ -111463,26 +316479,40 @@ ], "instructions": [ [ - "SSUBW" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_u16", + "name": "vst4q_lane_s32", "arguments": [ - "uint32x4_t a", - "uint16x4_t b" + "int32_t * ptr", + "int32x4x4_t val", + "const int lane" ], "return_type": { - "value": "uint32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "lane": { + "minimum": 0, + "maximum": 3 }, - "b": { - "register": "Vm.4H" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { + "register": "Vt4.4S" } }, "Architectures": [ @@ -111492,203 +316522,249 @@ ], "instructions": [ [ - "USUBW" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_u32", + "name": "vst4q_lane_s64", "arguments": [ - "uint64x2_t a", - "uint32x2_t b" + "int64_t * ptr", + "int64x2x4_t val", + "const int lane" ], "return_type": { - "value": "uint64x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "lane": { + "minimum": 0, + "maximum": 1 }, - "b": { - "register": "Vm.2S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USUBW" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsubw_u8", + "name": "vst4q_lane_s8", "arguments": [ - "uint16x8_t a", - "uint8x8_t b" + "int8_t * ptr", + "int8x16x4_t val", + "const int lane" ], "return_type": { - "value": "uint16x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "lane": { + "minimum": 0, + "maximum": 15 }, - "b": { - "register": "Vm.8B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { + "register": "Vt4.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "USUBW" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsudot_lane_s32", + "name": "vst4q_lane_u16", "arguments": [ - "int32x2_t r", - "int8x8_t a", - "uint8x8_t b", + "uint16_t * ptr", + "uint16x8x4_t val", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.4B" - }, "lane": { "minimum": 0, - "maximum": 1 + "maximum": 7 }, - "r": { - "register": "Vd.2S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "SUDOT" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsudot_laneq_s32", + "name": "vst4q_lane_u32", "arguments": [ - "int32x2_t r", - "int8x8_t a", - "uint8x16_t b", + "uint32_t * ptr", + "uint32x4x4_t val", "const int lane" ], "return_type": { - "value": "int32x2_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.4B" - }, "lane": { "minimum": 0, "maximum": 3 }, - "r": { - "register": "Vd.2S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { + "register": "Vt4.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "SUDOT" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsudotq_lane_s32", + "name": "vst4q_lane_u64", "arguments": [ - "int32x4_t r", - "int8x16_t a", - "uint8x8_t b", + "uint64_t * ptr", + "uint64x2x4_t val", "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.4B" - }, "lane": { "minimum": 0, "maximum": 1 }, - "r": { - "register": "Vd.4S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "SUDOT" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vsudotq_laneq_s32", + "name": "vst4q_lane_u8", "arguments": [ - "int32x4_t r", - "int8x16_t a", - "uint8x16_t b", + "uint8_t * ptr", + "uint8x16x4_t val", "const int lane" ], "return_type": { - "value": "int32x4_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" - }, - "b": { - "register": "Vm.4B" - }, "lane": { "minimum": 0, - "maximum": 3 + "maximum": 15 }, - "r": { - "register": "Vd.4S" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { + "register": "Vt4.16B" } }, "Architectures": [ @@ -111696,25 +316772,36 @@ ], "instructions": [ [ - "SUDOT" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl1_p8", + "name": "vst4q_p16", "arguments": [ - "poly8x8_t a", - "uint8x8_t idx" + "poly16_t * ptr", + "poly16x8x4_t val" ], "return_type": { - "value": "poly8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8H" }, - "idx": {} + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" + } }, "Architectures": [ "v7", @@ -111723,52 +316810,72 @@ ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl1_s8", + "name": "vst4q_p64", "arguments": [ - "int8x8_t a", - "int8x8_t idx" + "poly64_t * ptr", + "poly64x2x4_t val" ], "return_type": { - "value": "int8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" }, - "idx": {} + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl1_u8", + "name": "vst4q_p8", "arguments": [ - "uint8x8_t a", - "uint8x8_t idx" + "poly8_t * ptr", + "poly8x16x4_t val" ], "return_type": { - "value": "uint8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" }, - "idx": {} + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { + "register": "Vt4.16B" + } }, "Architectures": [ "v7", @@ -111777,25 +316884,36 @@ ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl2_p8", + "name": "vst4q_s16", "arguments": [ - "poly8x8x2_t a", - "uint8x8_t idx" + "int16_t * ptr", + "int16x8x4_t val" ], "return_type": { - "value": "poly8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" }, - "idx": {} + "val.val[3]": { + "register": "Vt4.8H" + } }, "Architectures": [ "v7", @@ -111804,25 +316922,36 @@ ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl2_s8", + "name": "vst4q_s32", "arguments": [ - "int8x8x2_t a", - "int8x8_t idx" + "int32_t * ptr", + "int32x4x4_t val" ], "return_type": { - "value": "int8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" }, - "idx": {} + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { + "register": "Vt4.4S" + } }, "Architectures": [ "v7", @@ -111831,52 +316960,72 @@ ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl2_u8", + "name": "vst4q_s64", "arguments": [ - "uint8x8x2_t a", - "uint8x8_t idx" + "int64_t * ptr", + "int64x2x4_t val" ], "return_type": { - "value": "uint8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.2D" }, - "idx": {} + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl3_p8", + "name": "vst4q_s8", "arguments": [ - "poly8x8x3_t a", - "uint8x8_t idx" + "int8_t * ptr", + "int8x16x4_t val" ], "return_type": { - "value": "poly8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.16B" }, - "idx": {} + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { + "register": "Vt4.16B" + } }, "Architectures": [ "v7", @@ -111885,25 +317034,36 @@ ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl3_s8", + "name": "vst4q_u16", "arguments": [ - "int8x8x3_t a", - "int8x8_t idx" + "uint16_t * ptr", + "uint16x8x4_t val" ], "return_type": { - "value": "int8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" }, - "idx": {} + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { + "register": "Vt4.8H" + } }, "Architectures": [ "v7", @@ -111912,25 +317072,36 @@ ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl3_u8", + "name": "vst4q_u32", "arguments": [ - "uint8x8x3_t a", - "uint8x8_t idx" + "uint32_t * ptr", + "uint32x4x4_t val" ], "return_type": { - "value": "uint8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" }, - "idx": {} + "val.val[3]": { + "register": "Vt4.4S" + } }, "Architectures": [ "v7", @@ -111939,52 +317110,72 @@ ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl4_p8", + "name": "vst4q_u64", "arguments": [ - "poly8x8x4_t a", - "uint8x8_t idx" + "uint64_t * ptr", + "uint64x2x4_t val" ], "return_type": { - "value": "poly8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" }, - "idx": {} + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { + "register": "Vt4.2D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl4_s8", + "name": "vst4q_u8", "arguments": [ - "int8x8x4_t a", - "int8x8_t idx" + "uint8_t * ptr", + "uint8x16x4_t val" ], "return_type": { - "value": "int8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" }, - "idx": {} + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { + "register": "Vt4.16B" + } }, "Architectures": [ "v7", @@ -111993,333 +317184,339 @@ ], "instructions": [ [ - "TBL" + "ST4" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbl4_u8", + "name": "vstl1_lane_f64", "arguments": [ - "uint8x8x4_t a", - "uint8x8_t idx" + "float64_t * ptr", + "float64x1_t val", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 0 }, - "idx": {} + "ptr": { + "register": "Xn" + }, + "val": { + "register": "Vt.1D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TBL" + "STL1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx1_p8", + "name": "vstl1_lane_p64", "arguments": [ - "poly8x8_t a", - "poly8x8_t b", - "uint8x8_t idx" + "poly64_t * ptr", + "poly64x1_t val", + "const int lane" ], "return_type": { - "value": "poly8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "ptr": { + "register": "Xn" }, - "idx": {} + "val": { + "register": "Vt.1D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOVI", - "CMHS", - "TBL", - "BIF" + "STL1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx1_s8", + "name": "vstl1_lane_s64", "arguments": [ - "int8x8_t a", - "int8x8_t b", - "int8x8_t idx" + "int64_t * ptr", + "int64x1_t val", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "ptr": { + "register": "Xn" }, - "idx": {} + "val": { + "register": "Vt.1D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOVI", - "CMHS", - "TBL", - "BIF" + "STL1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx1_u8", + "name": "vstl1_lane_u64", "arguments": [ - "uint8x8_t a", - "uint8x8_t b", - "uint8x8_t idx" + "uint64_t * ptr", + "uint64x1_t val", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 0 + }, + "ptr": { + "register": "Xn" }, - "idx": {} + "val": { + "register": "Vt.1D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOVI", - "CMHS", - "TBL", - "BIF" + "STL1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx2_p8", + "name": "vstl1q_lane_f64", "arguments": [ - "poly8x8_t a", - "poly8x8x2_t b", - "uint8x8_t idx" + "float64_t * ptr", + "float64x2_t val", + "const int lane" ], "return_type": { - "value": "poly8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" }, - "idx": {} + "val": { + "register": "Vt.2D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TBX" + "STL1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx2_s8", + "name": "vstl1q_lane_p64", "arguments": [ - "int8x8_t a", - "int8x8x2_t b", - "int8x8_t idx" + "poly64_t * ptr", + "poly64x2_t val", + "const int lane" ], "return_type": { - "value": "int8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" }, - "idx": {} + "val": { + "register": "Vt.2D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TBX" + "STL1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx2_u8", + "name": "vstl1q_lane_s64", "arguments": [ - "uint8x8_t a", - "uint8x8x2_t b", - "uint8x8_t idx" + "int64_t * ptr", + "int64x2_t val", + "const int lane" ], "return_type": { - "value": "uint8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" }, - "idx": {} + "val": { + "register": "Vt.2D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TBX" + "STL1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx3_p8", + "name": "vstl1q_lane_u64", "arguments": [ - "poly8x8_t a", - "poly8x8x3_t b", - "uint8x8_t idx" + "uint64_t * ptr", + "uint64x2_t val", + "const int lane" ], "return_type": { - "value": "poly8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "lane": { + "minimum": 0, + "maximum": 1 }, - "idx": {} + "ptr": { + "register": "Xn" + }, + "val": { + "register": "Vt.2D" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "MOVI", - "CMHS", - "TBL", - "BIF" + "STL1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx3_s8", + "name": "vstrq_p128", "arguments": [ - "int8x8_t a", - "int8x8x3_t b", - "int8x8_t idx" + "poly128_t * ptr", + "poly128_t val" ], "return_type": { - "value": "int8x8_t" + "value": "void" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "ptr": { + "register": "Xn" }, - "idx": {} + "val": { + "register": "Qt" + } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "MOVI", - "CMHS", - "TBL", - "BIF" + "STR" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx3_u8", + "name": "vsub_f16", "arguments": [ - "uint8x8_t a", - "uint8x8x3_t b", - "uint8x8_t idx" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "a": { + "register": "Vn.4H" }, - "idx": {} + "b": { + "register": "Vm.4H" + } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "MOVI", - "CMHS", - "TBL", - "BIF" + "FSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx4_p8", + "name": "vsub_f32", "arguments": [ - "poly8x8_t a", - "poly8x8x4_t b", - "uint8x8_t idx" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "poly8x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "a": { + "register": "Vn.2S" }, - "idx": {} + "b": { + "register": "Vm.2S" + } }, "Architectures": [ "v7", @@ -112328,56 +317525,54 @@ ], "instructions": [ [ - "TBX" + "FSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx4_s8", + "name": "vsub_f64", "arguments": [ - "int8x8_t a", - "int8x8x4_t b", - "int8x8_t idx" + "float64x1_t a", + "float64x1_t b" ], "return_type": { - "value": "int8x8_t" + "value": "float64x1_t" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "a": { + "register": "Dn" }, - "idx": {} + "b": { + "register": "Dm" + } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TBX" + "FSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtbx4_u8", + "name": "vsub_s16", "arguments": [ - "uint8x8_t a", - "uint8x8x4_t b", - "uint8x8_t idx" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": { - "register": "Vn.16B" + "a": { + "register": "Vn.4H" }, - "idx": {} + "b": { + "register": "Vm.4H" + } }, "Architectures": [ "v7", @@ -112386,181 +317581,193 @@ ], "instructions": [ [ - "TBX" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_f16", + "name": "vsub_s32", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "float16x4_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.2S" }, "b": { - "register": "Vm.4H" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_f32", + "name": "vsub_s64", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "int64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "float32x2_t" + "value": "int64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Dn" }, "b": { - "register": "Vm.2S" + "register": "Dm" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_p16", + "name": "vsub_s8", "arguments": [ - "poly16x4_t a", - "poly16x4_t b" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "poly16x4_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.8B" }, "b": { - "register": "Vm.4H" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_p8", + "name": "vsub_u16", "arguments": [ - "poly8x8_t a", - "poly8x8_t b" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "poly8x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4H" }, "b": { - "register": "Vm.8B" + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_s16", + "name": "vsub_u32", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "int16x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.2S" }, "b": { - "register": "Vm.4H" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_s32", + "name": "vsub_u64", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "uint64x1_t a", + "uint64x1_t b" ], "return_type": { - "value": "int32x2_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Dn" }, "b": { - "register": "Vm.2S" + "register": "Dm" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_s8", + "name": "vsub_u8", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "int8x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { @@ -112571,30 +317778,32 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_u16", + "name": "vsubd_s64", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "int64_t a", + "int64_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "int64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Dn" }, "b": { - "register": "Vm.4H" + "register": "Dm" } }, "Architectures": [ @@ -112602,26 +317811,26 @@ ], "instructions": [ [ - "TRN1" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_u32", + "name": "vsubd_u64", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "uint64_t a", + "uint64_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Dn" }, "b": { - "register": "Vm.2S" + "register": "Dm" } }, "Architectures": [ @@ -112629,46 +317838,48 @@ ], "instructions": [ [ - "TRN1" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1_u8", + "name": "vsubh_f16", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "float16_t a", + "float16_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "float16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Hn" }, "b": { - "register": "Vm.8B" + "register": "Hm" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "TRN1" + "FSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_f16", + "name": "vsubhn_high_s16", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "int8x8_t r", + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "float16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { @@ -112676,6 +317887,9 @@ }, "b": { "register": "Vm.8H" + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -112683,19 +317897,20 @@ ], "instructions": [ [ - "TRN1" + "SUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_f32", + "name": "vsubhn_high_s32", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "int16x4_t r", + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "float32x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -112703,6 +317918,9 @@ }, "b": { "register": "Vm.4S" + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -112710,19 +317928,20 @@ ], "instructions": [ [ - "TRN1" + "SUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_f64", + "name": "vsubhn_high_s64", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "int32x2_t r", + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "float64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -112730,6 +317949,9 @@ }, "b": { "register": "Vm.2D" + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -112737,19 +317959,20 @@ ], "instructions": [ [ - "TRN1" + "SUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_p16", + "name": "vsubhn_high_u16", "arguments": [ - "poly16x8_t a", - "poly16x8_t b" + "uint8x8_t r", + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { @@ -112757,6 +317980,9 @@ }, "b": { "register": "Vm.8H" + }, + "r": { + "register": "Vd.8B" } }, "Architectures": [ @@ -112764,26 +317990,30 @@ ], "instructions": [ [ - "TRN1" + "SUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_p64", + "name": "vsubhn_high_u32", "arguments": [ - "poly64x2_t a", - "poly64x2_t b" + "uint16x4_t r", + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "poly64x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4S" }, "b": { - "register": "Vm.2D" + "register": "Vm.4S" + }, + "r": { + "register": "Vd.4H" } }, "Architectures": [ @@ -112791,26 +318021,30 @@ ], "instructions": [ [ - "TRN1" + "SUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_p8", + "name": "vsubhn_high_u64", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "uint32x2_t r", + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2D" }, "b": { - "register": "Vm.16B" + "register": "Vm.2D" + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ @@ -112818,19 +318052,19 @@ ], "instructions": [ [ - "TRN1" + "SUBHN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_s16", + "name": "vsubhn_s16", "arguments": [ "int16x8_t a", "int16x8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { @@ -112841,23 +318075,25 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_s32", + "name": "vsubhn_s32", "arguments": [ "int32x4_t a", "int32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { @@ -112868,23 +318104,25 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_s64", + "name": "vsubhn_s64", "arguments": [ "int64x2_t a", "int64x2_t b" ], "return_type": { - "value": "int64x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -112895,111 +318133,119 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_s8", + "name": "vsubhn_u16", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, "b": { - "register": "Vm.16B" + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_u16", + "name": "vsubhn_u32", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4S" }, "b": { - "register": "Vm.8H" + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_u32", + "name": "vsubhn_u64", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2D" }, "b": { - "register": "Vm.4S" + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN1" + "SUBHN" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_u64", + "name": "vsubl_high_s16", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.8H" }, "b": { - "register": "Vm.2D" + "register": "Vm.8H" } }, "Architectures": [ @@ -113007,26 +318253,26 @@ ], "instructions": [ [ - "TRN1" + "SSUBL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn1q_u8", + "name": "vsubl_high_s32", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, "b": { - "register": "Vm.16B" + "register": "Vm.4S" } }, "Architectures": [ @@ -113034,26 +318280,26 @@ ], "instructions": [ [ - "TRN1" + "SSUBL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_f16", + "name": "vsubl_high_s8", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "float16x4_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.16B" }, "b": { - "register": "Vm.4H" + "register": "Vm.16B" } }, "Architectures": [ @@ -113061,26 +318307,26 @@ ], "instructions": [ [ - "TRN2" + "SSUBL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_f32", + "name": "vsubl_high_u16", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "float32x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.8H" }, "b": { - "register": "Vm.2S" + "register": "Vm.8H" } }, "Architectures": [ @@ -113088,26 +318334,26 @@ ], "instructions": [ [ - "TRN2" + "USUBL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_p16", + "name": "vsubl_high_u32", "arguments": [ - "poly16x4_t a", - "poly16x4_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "poly16x4_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" }, "b": { - "register": "Vm.4H" + "register": "Vm.4S" } }, "Architectures": [ @@ -113115,26 +318361,26 @@ ], "instructions": [ [ - "TRN2" + "USUBL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_p8", + "name": "vsubl_high_u8", "arguments": [ - "poly8x8_t a", - "poly8x8_t b" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "poly8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.16B" }, "b": { - "register": "Vm.8B" + "register": "Vm.16B" } }, "Architectures": [ @@ -113142,19 +318388,19 @@ ], "instructions": [ [ - "TRN2" + "USUBL2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_s16", + "name": "vsubl_s16", "arguments": [ "int16x4_t a", "int16x4_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -113165,23 +318411,25 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SSUBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_s32", + "name": "vsubl_s32", "arguments": [ "int32x2_t a", "int32x2_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { @@ -113192,23 +318440,25 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SSUBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_s8", + "name": "vsubl_s8", "arguments": [ "int8x8_t a", "int8x8_t b" ], "return_type": { - "value": "int8x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -113219,23 +318469,25 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SSUBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_u16", + "name": "vsubl_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { @@ -113246,23 +318498,25 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "USUBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_u32", + "name": "vsubl_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { @@ -113273,23 +318527,25 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "USUBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2_u8", + "name": "vsubl_u8", "arguments": [ "uint8x8_t a", "uint8x8_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { @@ -113300,17 +318556,19 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "USUBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_f16", + "name": "vsubq_f16", "arguments": [ "float16x8_t a", "float16x8_t b" @@ -113327,17 +318585,18 @@ } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "TRN2" + "FSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_f32", + "name": "vsubq_f32", "arguments": [ "float32x4_t a", "float32x4_t b" @@ -113354,17 +318613,19 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "FSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_f64", + "name": "vsubq_f64", "arguments": [ "float64x2_t a", "float64x2_t b" @@ -113385,19 +318646,19 @@ ], "instructions": [ [ - "TRN2" + "FSUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_p16", + "name": "vsubq_s16", "arguments": [ - "poly16x8_t a", - "poly16x8_t b" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -113408,219 +318669,235 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_p64", + "name": "vsubq_s32", "arguments": [ - "poly64x2_t a", - "poly64x2_t b" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "poly64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4S" }, "b": { - "register": "Vm.2D" + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_p8", + "name": "vsubq_s64", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2D" }, "b": { - "register": "Vm.16B" + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_s16", + "name": "vsubq_s8", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.16B" }, "b": { - "register": "Vm.8H" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_s32", + "name": "vsubq_u16", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8H" }, "b": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_s64", + "name": "vsubq_u32", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4S" }, "b": { - "register": "Vm.2D" + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_s8", + "name": "vsubq_u64", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2D" }, "b": { - "register": "Vm.16B" + "register": "Vm.2D" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_u16", + "name": "vsubq_u8", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.16B" }, "b": { - "register": "Vm.8H" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "TRN2" + "SUB" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_u32", + "name": "vsubw_high_s16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int32x4_t a", + "int16x8_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { "register": "Vn.4S" }, "b": { - "register": "Vm.4S" + "register": "Vm.8H" } }, "Architectures": [ @@ -113628,26 +318905,26 @@ ], "instructions": [ [ - "TRN2" + "SSUBW2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_u64", + "name": "vsubw_high_s32", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "int64x2_t a", + "int32x4_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { "register": "Vn.2D" }, "b": { - "register": "Vm.2D" + "register": "Vm.4S" } }, "Architectures": [ @@ -113655,23 +318932,23 @@ ], "instructions": [ [ - "TRN2" + "SSUBW2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn2q_u8", + "name": "vsubw_high_s8", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "int16x8_t a", + "int8x16_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, "b": { "register": "Vm.16B" @@ -113682,143 +318959,104 @@ ], "instructions": [ [ - "TRN2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vtrn_f16", - "arguments": [ - "float16x4_t a", - "float16x4_t b" - ], - "return_type": { - "value": "float16x4x2_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" - } - }, - "Architectures": [ - "v7", - "A32", - "A64" - ], - "instructions": [ - [ - "TRN1", - "TRN2" + "SSUBW2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn_f32", + "name": "vsubw_high_u16", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "uint32x4_t a", + "uint16x8_t b" ], "return_type": { - "value": "float32x2x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4S" }, "b": { - "register": "Vm.2S" + "register": "Vm.8H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TRN1", - "TRN2" + "USUBW2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn_p16", + "name": "vsubw_high_u32", "arguments": [ - "poly16x4_t a", - "poly16x4_t b" + "uint64x2_t a", + "uint32x4_t b" ], "return_type": { - "value": "poly16x4x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.2D" }, "b": { - "register": "Vm.4H" + "register": "Vm.4S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TRN1", - "TRN2" + "USUBW2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn_p8", + "name": "vsubw_high_u8", "arguments": [ - "poly8x8_t a", - "poly8x8_t b" + "uint16x8_t a", + "uint8x16_t b" ], "return_type": { - "value": "poly8x8x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, "b": { - "register": "Vm.8B" + "register": "Vm.16B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "TRN1", - "TRN2" + "USUBW2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn_s16", + "name": "vsubw_s16", "arguments": [ - "int16x4_t a", + "int32x4_t a", "int16x4_t b" ], "return_type": { - "value": "int16x4x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" }, "b": { "register": "Vm.4H" @@ -113831,24 +319069,23 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "SSUBW" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn_s32", + "name": "vsubw_s32", "arguments": [ - "int32x2_t a", + "int64x2_t a", "int32x2_t b" ], "return_type": { - "value": "int32x2x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.2D" }, "b": { "register": "Vm.2S" @@ -113861,24 +319098,23 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "SSUBW" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn_s8", + "name": "vsubw_s8", "arguments": [ - "int8x8_t a", + "int16x8_t a", "int8x8_t b" ], "return_type": { - "value": "int8x8x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, "b": { "register": "Vm.8B" @@ -113891,24 +319127,23 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "SSUBW" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn_u16", + "name": "vsubw_u16", "arguments": [ - "uint16x4_t a", + "uint32x4_t a", "uint16x4_t b" ], "return_type": { - "value": "uint16x4x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.4S" }, "b": { "register": "Vm.4H" @@ -113921,24 +319156,23 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "USUBW" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn_u32", + "name": "vsubw_u32", "arguments": [ - "uint32x2_t a", + "uint64x2_t a", "uint32x2_t b" ], "return_type": { - "value": "uint32x2x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.2D" }, "b": { "register": "Vm.2S" @@ -113951,24 +319185,23 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "USUBW" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrn_u8", + "name": "vsubw_u8", "arguments": [ - "uint8x8_t a", + "uint16x8_t a", "uint8x8_t b" ], "return_type": { - "value": "uint8x8x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, "b": { "register": "Vm.8B" @@ -113981,147 +319214,174 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "USUBW" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_f16", + "name": "vsudot_lane_s32", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "int32x2_t r", + "int8x8_t a", + "uint8x8_t b", + "const int lane" ], "return_type": { - "value": "float16x8x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm.8H" + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "TRN1", - "TRN2" + "SUDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_f32", + "name": "vsudot_laneq_s32", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "int32x2_t r", + "int8x8_t a", + "uint8x16_t b", + "const int lane" ], "return_type": { - "value": "float32x4x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8B" }, "b": { - "register": "Vm.4S" + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "TRN1", - "TRN2" + "SUDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_p16", + "name": "vsudotq_lane_s32", "arguments": [ - "poly16x8_t a", - "poly16x8_t b" + "int32x4_t r", + "int8x16_t a", + "uint8x8_t b", + "const int lane" ], "return_type": { - "value": "poly16x8x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm.8H" + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "TRN1", - "TRN2" + "SUDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_p8", + "name": "vsudotq_laneq_s32", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "int32x4_t r", + "int8x16_t a", + "uint8x16_t b", + "const int lane" ], "return_type": { - "value": "poly8x16x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8B" }, "b": { - "register": "Vm.16B" + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "TRN1", - "TRN2" + "SUDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_s16", + "name": "vtbl1_p8", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "poly8x8_t a", + "uint8x8_t idx" ], "return_type": { - "value": "int16x8x2_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "Zeros(64):a": { + "register": "Vn.16B" }, - "b": { - "register": "Vm.8H" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114131,27 +319391,26 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_s32", + "name": "vtbl1_s8", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int8x8_t a", + "int8x8_t idx" ], "return_type": { - "value": "int32x4x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "Zeros(64):a": { + "register": "Vn.16B" }, - "b": { - "register": "Vm.4S" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114161,27 +319420,26 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_s8", + "name": "vtbl1_u8", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "uint8x8_t a", + "uint8x8_t idx" ], "return_type": { - "value": "int8x16x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { + "Zeros(64):a": { "register": "Vn.16B" }, - "b": { - "register": "Vm.16B" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114191,27 +319449,26 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_u16", + "name": "vtbl2_p8", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "poly8x8x2_t a", + "uint8x8_t idx" ], "return_type": { - "value": "uint16x8x2_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "a.val[1]:a.val[0]": { + "register": "Vn.16B" }, - "b": { - "register": "Vm.8H" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114221,27 +319478,26 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_u32", + "name": "vtbl2_s8", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int8x8x2_t a", + "int8x8_t idx" ], "return_type": { - "value": "uint32x4x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "a.val[1]:a.val[0]": { + "register": "Vn.16B" }, - "b": { - "register": "Vm.4S" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114251,27 +319507,26 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtrnq_u8", + "name": "vtbl2_u8", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "uint8x8x2_t a", + "uint8x8_t idx" ], "return_type": { - "value": "uint8x16x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { + "a.val[1]:a.val[0]": { "register": "Vn.16B" }, - "b": { - "register": "Vm.16B" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114281,54 +319536,60 @@ ], "instructions": [ [ - "TRN1", - "TRN2" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_p64", + "name": "vtbl3_p8", "arguments": [ - "poly64x1_t a", - "poly64x1_t b" + "poly8x8x3_t a", + "uint8x8_t idx" ], "return_type": { - "value": "uint64x1_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "Zeros(64):a.val[2]": { + "register": "Vn+1.16B" }, - "b": { - "register": "Dm" + "a.val[1]:a.val[0]": { + "register": "Vn.16B" + }, + "idx": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "CMTST" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_p8", + "name": "vtbl3_s8", "arguments": [ - "poly8x8_t a", - "poly8x8_t b" + "int8x8x3_t a", + "int8x8_t idx" ], "return_type": { - "value": "uint8x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "Zeros(64):a.val[2]": { + "register": "Vn+1.16B" }, - "b": { + "a.val[1]:a.val[0]": { + "register": "Vn.16B" + }, + "idx": { "register": "Vm.8B" } }, @@ -114339,26 +319600,29 @@ ], "instructions": [ [ - "CMTST" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_s16", + "name": "vtbl3_u8", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "uint8x8x3_t a", + "uint8x8_t idx" ], "return_type": { - "value": "uint16x4_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "Zeros(64):a.val[2]": { + "register": "Vn+1.16B" }, - "b": { - "register": "Vm.4H" + "a.val[1]:a.val[0]": { + "register": "Vn.16B" + }, + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114368,26 +319632,29 @@ ], "instructions": [ [ - "CMTST" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_s32", + "name": "vtbl4_p8", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "poly8x8x4_t a", + "uint8x8_t idx" ], "return_type": { - "value": "uint32x2_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "a.val[1]:a.val[0]": { + "register": "Vn.16B" }, - "b": { - "register": "Vm.2S" + "a.val[3]:a.val[2]": { + "register": "Vn+1.16B" + }, + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114397,52 +319664,60 @@ ], "instructions": [ [ - "CMTST" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_s64", + "name": "vtbl4_s8", "arguments": [ - "int64x1_t a", - "int64x1_t b" + "int8x8x4_t a", + "int8x8_t idx" ], "return_type": { - "value": "uint64x1_t" + "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Dn" + "a.val[1]:a.val[0]": { + "register": "Vn.16B" }, - "b": { - "register": "Dm" + "a.val[3]:a.val[2]": { + "register": "Vn+1.16B" + }, + "idx": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "CMTST" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_s8", + "name": "vtbl4_u8", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "uint8x8x4_t a", + "uint8x8_t idx" ], "return_type": { "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8B" + "a.val[1]:a.val[0]": { + "register": "Vn.16B" }, - "b": { + "a.val[3]:a.val[2]": { + "register": "Vn+1.16B" + }, + "idx": { "register": "Vm.8B" } }, @@ -114453,26 +319728,30 @@ ], "instructions": [ [ - "CMTST" + "TBL" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_u16", + "name": "vtbx1_p8", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "poly8x8_t a", + "poly8x8_t b", + "uint8x8_t idx" ], "return_type": { - "value": "uint16x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { + "Zeros(64):b": { + "register": "Vn.16B" + }, "a": { - "register": "Vn.4H" + "register": "Vd.8B" }, - "b": { - "register": "Vm.4H" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114482,26 +319761,33 @@ ], "instructions": [ [ - "CMTST" + "MOVI", + "CMHS", + "TBL", + "BIF" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_u32", + "name": "vtbx1_s8", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "int8x8_t a", + "int8x8_t b", + "int8x8_t idx" ], "return_type": { - "value": "uint32x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { + "Zeros(64):b": { + "register": "Vn.16B" + }, "a": { - "register": "Vn.2S" + "register": "Vd.8B" }, - "b": { - "register": "Vm.2S" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114511,52 +319797,68 @@ ], "instructions": [ [ - "CMTST" + "MOVI", + "CMHS", + "TBL", + "BIF" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_u64", + "name": "vtbx1_u8", "arguments": [ - "uint64x1_t a", - "uint64x1_t b" + "uint8x8_t a", + "uint8x8_t b", + "uint8x8_t idx" ], "return_type": { - "value": "uint64x1_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { + "Zeros(64):b": { + "register": "Vn.16B" + }, "a": { - "register": "Dn" + "register": "Vd.8B" }, - "b": { - "register": "Dm" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "CMTST" + "MOVI", + "CMHS", + "TBL", + "BIF" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtst_u8", + "name": "vtbx2_p8", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "poly8x8_t a", + "poly8x8x2_t b", + "uint8x8_t idx" ], "return_type": { - "value": "uint8x8_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vd.8B" }, - "b": { + "b.val[1]:b.val[0]": { + "register": "Vn.16B" + }, + "idx": { "register": "Vm.8B" } }, @@ -114567,108 +319869,138 @@ ], "instructions": [ [ - "CMTST" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstd_s64", + "name": "vtbx2_s8", "arguments": [ - "int64_t a", - "int64_t b" + "int8x8_t a", + "int8x8x2_t b", + "int8x8_t idx" ], "return_type": { - "value": "uint64_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.8B" }, - "b": { - "register": "Dm" + "b.val[1]:b.val[0]": { + "register": "Vn.16B" + }, + "idx": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "CMTST" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstd_u64", + "name": "vtbx2_u8", "arguments": [ - "uint64_t a", - "uint64_t b" + "uint8x8_t a", + "uint8x8x2_t b", + "uint8x8_t idx" ], "return_type": { - "value": "uint64_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dn" + "register": "Vd.8B" }, - "b": { - "register": "Dm" + "b.val[1]:b.val[0]": { + "register": "Vn.16B" + }, + "idx": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "CMTST" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_p64", + "name": "vtbx3_p8", "arguments": [ - "poly64x2_t a", - "poly64x2_t b" + "poly8x8_t a", + "poly8x8x3_t b", + "uint8x8_t idx" ], "return_type": { - "value": "uint64x2_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { + "Zeros(64):b.val[2]": { + "register": "Vn+1.16B" + }, "a": { - "register": "Vn.2D" + "register": "Vd.8B" }, - "b": { - "register": "Vm.2D" + "b.val[1]:b.val[0]": { + "register": "Vn.16B" + }, + "idx": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", "A32", "A64" ], "instructions": [ [ - "CMTST" + "MOVI", + "CMHS", + "TBL", + "BIF" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_p8", + "name": "vtbx3_s8", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "int8x8_t a", + "int8x8x3_t b", + "int8x8_t idx" ], "return_type": { - "value": "uint8x16_t" + "value": "int8x8_t" }, "Arguments_Preparation": { + "Zeros(64):b.val[2]": { + "register": "Vn+1.16B" + }, "a": { + "register": "Vd.8B" + }, + "b.val[1]:b.val[0]": { "register": "Vn.16B" }, - "b": { - "register": "Vm.16B" + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114678,26 +320010,36 @@ ], "instructions": [ [ - "CMTST" + "MOVI", + "CMHS", + "TBL", + "BIF" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_s16", + "name": "vtbx3_u8", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "uint8x8_t a", + "uint8x8x3_t b", + "uint8x8_t idx" ], "return_type": { - "value": "uint16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { + "Zeros(64):b.val[2]": { + "register": "Vn+1.16B" + }, "a": { - "register": "Vn.8H" + "register": "Vd.8B" }, - "b": { - "register": "Vm.8H" + "b.val[1]:b.val[0]": { + "register": "Vn.16B" + }, + "idx": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114707,26 +320049,36 @@ ], "instructions": [ [ - "CMTST" + "MOVI", + "CMHS", + "TBL", + "BIF" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_s32", + "name": "vtbx4_p8", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "poly8x8_t a", + "poly8x8x4_t b", + "uint8x8_t idx" ], "return_type": { - "value": "uint32x4_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vd.8B" }, - "b": { - "register": "Vm.4S" + "b.val[1]:b.val[0]": { + "register": "Vn.16B" + }, + "b.val[3]:b.val[2]": { + "register": "Vn+1.16B" + }, + "c": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114736,53 +320088,69 @@ ], "instructions": [ [ - "CMTST" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_s64", + "name": "vtbx4_s8", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "int8x8_t a", + "int8x8x4_t b", + "int8x8_t idx" ], "return_type": { - "value": "uint64x2_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vd.8B" }, - "b": { - "register": "Vm.2D" + "b.val[1]:b.val[0]": { + "register": "Vn.16B" + }, + "b.val[3]:b.val[2]": { + "register": "Vn+1.16B" + }, + "c": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "CMTST" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_s8", + "name": "vtbx4_u8", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "uint8x8_t a", + "uint8x8x4_t b", + "uint8x8_t idx" ], "return_type": { - "value": "uint8x16_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { + "register": "Vd.8B" + }, + "b.val[1]:b.val[0]": { "register": "Vn.16B" }, - "b": { - "register": "Vm.16B" + "b.val[3]:b.val[2]": { + "register": "Vn+1.16B" + }, + "c": { + "register": "Vm.8B" } }, "Architectures": [ @@ -114792,84 +320160,80 @@ ], "instructions": [ [ - "CMTST" + "TBX" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_u16", + "name": "vtrn1_f16", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "float16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, "b": { - "register": "Vm.8H" + "register": "Vm.4H" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMTST" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_u32", + "name": "vtrn1_f32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.2S" }, "b": { - "register": "Vm.4S" + "register": "Vm.2S" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMTST" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_u64", + "name": "vtrn1_p16", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "poly16x4_t a", + "poly16x4_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4H" }, "b": { - "register": "Vm.2D" + "register": "Vm.4H" } }, "Architectures": [ @@ -114877,55 +320241,53 @@ ], "instructions": [ [ - "CMTST" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vtstq_u8", + "name": "vtrn1_p8", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "poly8x8_t a", + "poly8x8_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8B" }, "b": { - "register": "Vm.16B" + "register": "Vm.8B" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "CMTST" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqadd_s16", + "name": "vtrn1_s16", "arguments": [ "int16x4_t a", - "uint16x4_t b" + "int16x4_t b" ], "return_type": { "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4H" + "register": "Vn.4H" }, "b": { - "register": "Vn.4H" + "register": "Vm.4H" } }, "Architectures": [ @@ -114933,26 +320295,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqadd_s32", + "name": "vtrn1_s32", "arguments": [ "int32x2_t a", - "uint32x2_t b" + "int32x2_t b" ], "return_type": { "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2S" + "register": "Vn.2S" }, "b": { - "register": "Vn.2S" + "register": "Vm.2S" } }, "Architectures": [ @@ -114960,26 +320322,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqadd_s64", + "name": "vtrn1_s8", "arguments": [ - "int64x1_t a", - "uint64x1_t b" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int64x1_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.8B" }, "b": { - "register": "Dn" + "register": "Vm.8B" } }, "Architectures": [ @@ -114987,26 +320349,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqadd_s8", + "name": "vtrn1_u16", "arguments": [ - "int8x8_t a", - "uint8x8_t b" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "int8x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8B" + "register": "Vn.4H" }, "b": { - "register": "Vn.8B" + "register": "Vm.4H" } }, "Architectures": [ @@ -115014,26 +320376,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqaddb_s8", + "name": "vtrn1_u32", "arguments": [ - "int8_t a", - "uint8_t b" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "int8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Bd" + "register": "Vn.2S" }, "b": { - "register": "Bn" + "register": "Vm.2S" } }, "Architectures": [ @@ -115041,26 +320403,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqaddd_s64", + "name": "vtrn1_u8", "arguments": [ - "int64_t a", - "uint64_t b" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "int64_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Dd" + "register": "Vn.8B" }, "b": { - "register": "Dn" + "register": "Vm.8B" } }, "Architectures": [ @@ -115068,26 +320430,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqaddh_s16", + "name": "vtrn1q_f16", "arguments": [ - "int16_t a", - "uint16_t b" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "int16_t" + "value": "float16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Hd" + "register": "Vn.8H" }, "b": { - "register": "Hn" + "register": "Vm.8H" } }, "Architectures": [ @@ -115095,26 +320457,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqaddq_s16", + "name": "vtrn1q_f32", "arguments": [ - "int16x8_t a", - "uint16x8_t b" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.8H" + "register": "Vn.4S" }, "b": { - "register": "Vn.8H" + "register": "Vm.4S" } }, "Architectures": [ @@ -115122,26 +320484,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqaddq_s32", + "name": "vtrn1q_f64", "arguments": [ - "int32x4_t a", - "uint32x4_t b" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "int32x4_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.4S" + "register": "Vn.2D" }, "b": { - "register": "Vn.4S" + "register": "Vm.2D" } }, "Architectures": [ @@ -115149,26 +320511,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqaddq_s64", + "name": "vtrn1q_p16", "arguments": [ - "int64x2_t a", - "uint64x2_t b" + "poly16x8_t a", + "poly16x8_t b" ], "return_type": { - "value": "int64x2_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.2D" + "register": "Vn.8H" }, "b": { - "register": "Vn.2D" + "register": "Vm.8H" } }, "Architectures": [ @@ -115176,26 +320538,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqaddq_s8", + "name": "vtrn1q_p64", "arguments": [ - "int8x16_t a", - "uint8x16_t b" + "poly64x2_t a", + "poly64x2_t b" ], "return_type": { - "value": "int8x16_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vd.16B" + "register": "Vn.2D" }, "b": { - "register": "Vn.16B" + "register": "Vm.2D" } }, "Architectures": [ @@ -115203,26 +320565,26 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuqadds_s32", + "name": "vtrn1q_p8", "arguments": [ - "int32_t a", - "uint32_t b" + "poly8x16_t a", + "poly8x16_t b" ], "return_type": { - "value": "int32_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Sd" + "register": "Vn.16B" }, "b": { - "register": "Sn" + "register": "Vm.16B" } }, "Architectures": [ @@ -115230,72 +320592,53 @@ ], "instructions": [ [ - "SUQADD" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vusdot_lane_s32", + "name": "vtrn1q_s16", "arguments": [ - "int32x2_t r", - "uint8x8_t a", - "int8x8_t b", - "const int lane" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.8H" }, "b": { - "register": "Vm.4B" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.2S" + "register": "Vm.8H" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "USDOT" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vusdot_laneq_s32", + "name": "vtrn1q_s32", "arguments": [ - "int32x2_t r", - "uint8x8_t a", - "int8x16_t b", - "const int lane" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.4S" }, "b": { - "register": "Vm.4B" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.2S" + "register": "Vm.4S" } }, "Architectures": [ @@ -115303,104 +320646,80 @@ ], "instructions": [ [ - "USDOT" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vusdot_s32", + "name": "vtrn1q_s64", "arguments": [ - "int32x2_t r", - "uint8x8_t a", - "int8x8_t b" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2D" }, "b": { - "register": "Vm.8B" - }, - "r": { - "register": "Vd.2S" + "register": "Vm.2D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "USDOT" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vusdotq_lane_s32", + "name": "vtrn1q_s8", "arguments": [ - "int32x4_t r", - "uint8x16_t a", - "int8x8_t b", - "const int lane" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "int32x4_t" + "value": "int8x16_t" }, "Arguments_Preparation": { "a": { "register": "Vn.16B" }, "b": { - "register": "Vm.4B" - }, - "lane": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.4S" + "register": "Vm.16B" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "USDOT" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vusdotq_laneq_s32", + "name": "vtrn1q_u16", "arguments": [ - "int32x4_t r", - "uint8x16_t a", - "int8x16_t b", - "const int lane" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, "b": { - "register": "Vm.4B" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.4S" + "register": "Vm.8H" } }, "Architectures": [ @@ -115408,30 +320727,53 @@ ], "instructions": [ [ - "USDOT" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vusdotq_s32", + "name": "vtrn1q_u32", "arguments": [ - "int32x4_t r", - "uint8x16_t a", - "int8x16_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, "b": { - "register": "Vm.16B" + "register": "Vm.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vtrn1q_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" }, - "r": { - "register": "Vd.4S" + "b": { + "register": "Vm.2D" } }, "Architectures": [ @@ -115439,20 +320781,19 @@ ], "instructions": [ [ - "USDOT" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vusmmlaq_s32", + "name": "vtrn1q_u8", "arguments": [ - "int32x4_t r", "uint8x16_t a", - "int8x16_t b" + "uint8x16_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { @@ -115460,24 +320801,20 @@ }, "b": { "register": "Vm.16B" - }, - "r": { - "register": "Vd.4S" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "USMMLA" + "TRN1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_f16", + "name": "vtrn2_f16", "arguments": [ "float16x4_t a", "float16x4_t b" @@ -115498,13 +320835,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_f32", + "name": "vtrn2_f32", "arguments": [ "float32x2_t a", "float32x2_t b" @@ -115525,13 +320862,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_p16", + "name": "vtrn2_p16", "arguments": [ "poly16x4_t a", "poly16x4_t b" @@ -115552,13 +320889,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_p8", + "name": "vtrn2_p8", "arguments": [ "poly8x8_t a", "poly8x8_t b" @@ -115579,13 +320916,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_s16", + "name": "vtrn2_s16", "arguments": [ "int16x4_t a", "int16x4_t b" @@ -115606,13 +320943,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_s32", + "name": "vtrn2_s32", "arguments": [ "int32x2_t a", "int32x2_t b" @@ -115633,13 +320970,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_s8", + "name": "vtrn2_s8", "arguments": [ "int8x8_t a", "int8x8_t b" @@ -115660,13 +320997,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_u16", + "name": "vtrn2_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b" @@ -115687,13 +321024,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_u32", + "name": "vtrn2_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b" @@ -115714,13 +321051,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1_u8", + "name": "vtrn2_u8", "arguments": [ "uint8x8_t a", "uint8x8_t b" @@ -115741,13 +321078,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_f16", + "name": "vtrn2q_f16", "arguments": [ "float16x8_t a", "float16x8_t b" @@ -115768,13 +321105,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_f32", + "name": "vtrn2q_f32", "arguments": [ "float32x4_t a", "float32x4_t b" @@ -115795,13 +321132,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_f64", + "name": "vtrn2q_f64", "arguments": [ "float64x2_t a", "float64x2_t b" @@ -115822,13 +321159,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_p16", + "name": "vtrn2q_p16", "arguments": [ "poly16x8_t a", "poly16x8_t b" @@ -115849,13 +321186,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_p64", + "name": "vtrn2q_p64", "arguments": [ "poly64x2_t a", "poly64x2_t b" @@ -115876,13 +321213,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_p8", + "name": "vtrn2q_p8", "arguments": [ "poly8x16_t a", "poly8x16_t b" @@ -115903,13 +321240,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_s16", + "name": "vtrn2q_s16", "arguments": [ "int16x8_t a", "int16x8_t b" @@ -115930,13 +321267,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_s32", + "name": "vtrn2q_s32", "arguments": [ "int32x4_t a", "int32x4_t b" @@ -115957,13 +321294,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_s64", + "name": "vtrn2q_s64", "arguments": [ "int64x2_t a", "int64x2_t b" @@ -115984,13 +321321,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_s8", + "name": "vtrn2q_s8", "arguments": [ "int8x16_t a", "int8x16_t b" @@ -116011,13 +321348,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_u16", + "name": "vtrn2q_u16", "arguments": [ "uint16x8_t a", "uint16x8_t b" @@ -116038,13 +321375,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_u32", + "name": "vtrn2q_u32", "arguments": [ "uint32x4_t a", "uint32x4_t b" @@ -116065,13 +321402,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_u64", + "name": "vtrn2q_u64", "arguments": [ "uint64x2_t a", "uint64x2_t b" @@ -116092,13 +321429,13 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp1q_u8", + "name": "vtrn2q_u8", "arguments": [ "uint8x16_t a", "uint8x16_t b" @@ -116119,19 +321456,19 @@ ], "instructions": [ [ - "UZP1" + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_f16", + "name": "vtrn_f16", "arguments": [ "float16x4_t a", "float16x4_t b" ], "return_type": { - "value": "float16x4_t" + "value": "float16x4x2_t" }, "Arguments_Preparation": { "a": { @@ -116142,23 +321479,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_f32", + "name": "vtrn_f32", "arguments": [ "float32x2_t a", "float32x2_t b" ], "return_type": { - "value": "float32x2_t" + "value": "float32x2x2_t" }, "Arguments_Preparation": { "a": { @@ -116169,23 +321509,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_p16", + "name": "vtrn_p16", "arguments": [ "poly16x4_t a", "poly16x4_t b" ], "return_type": { - "value": "poly16x4_t" + "value": "poly16x4x2_t" }, "Arguments_Preparation": { "a": { @@ -116196,23 +321539,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_p8", + "name": "vtrn_p8", "arguments": [ "poly8x8_t a", "poly8x8_t b" ], "return_type": { - "value": "poly8x8_t" + "value": "poly8x8x2_t" }, "Arguments_Preparation": { "a": { @@ -116223,23 +321569,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_s16", + "name": "vtrn_s16", "arguments": [ "int16x4_t a", "int16x4_t b" ], "return_type": { - "value": "int16x4_t" + "value": "int16x4x2_t" }, "Arguments_Preparation": { "a": { @@ -116250,23 +321599,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_s32", + "name": "vtrn_s32", "arguments": [ "int32x2_t a", "int32x2_t b" ], "return_type": { - "value": "int32x2_t" + "value": "int32x2x2_t" }, "Arguments_Preparation": { "a": { @@ -116277,23 +321629,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_s8", + "name": "vtrn_s8", "arguments": [ "int8x8_t a", "int8x8_t b" ], "return_type": { - "value": "int8x8_t" + "value": "int8x8x2_t" }, "Arguments_Preparation": { "a": { @@ -116304,23 +321659,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_u16", + "name": "vtrn_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b" ], "return_type": { - "value": "uint16x4_t" + "value": "uint16x4x2_t" }, "Arguments_Preparation": { "a": { @@ -116331,23 +321689,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_u32", + "name": "vtrn_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b" ], "return_type": { - "value": "uint32x2_t" + "value": "uint32x2x2_t" }, "Arguments_Preparation": { "a": { @@ -116358,23 +321719,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2_u8", + "name": "vtrn_u8", "arguments": [ "uint8x8_t a", "uint8x8_t b" ], "return_type": { - "value": "uint8x8_t" + "value": "uint8x8x2_t" }, "Arguments_Preparation": { "a": { @@ -116385,23 +321749,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_f16", + "name": "vtrnq_f16", "arguments": [ "float16x8_t a", "float16x8_t b" ], "return_type": { - "value": "float16x8_t" + "value": "float16x8x2_t" }, "Arguments_Preparation": { "a": { @@ -116412,23 +321779,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_f32", + "name": "vtrnq_f32", "arguments": [ "float32x4_t a", "float32x4_t b" ], "return_type": { - "value": "float32x4_t" + "value": "float32x4x2_t" }, "Arguments_Preparation": { "a": { @@ -116439,50 +321809,86 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_f64", + "name": "vtrnq_p16", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "poly16x8_t a", + "poly16x8_t b" ], "return_type": { - "value": "float64x2_t" + "value": "poly16x8x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.8H" }, "b": { - "register": "Vm.2D" + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_p16", + "name": "vtrnq_p8", "arguments": [ - "poly16x8_t a", - "poly16x8_t b" + "poly8x16_t a", + "poly8x16_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "poly8x16x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "TRN1", + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vtrnq_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int16x8x2_t" }, "Arguments_Preparation": { "a": { @@ -116493,50 +321899,56 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_p64", + "name": "vtrnq_s32", "arguments": [ - "poly64x2_t a", - "poly64x2_t b" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "poly64x2_t" + "value": "int32x4x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.4S" }, "b": { - "register": "Vm.2D" + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_p8", + "name": "vtrnq_s8", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "int8x16x2_t" }, "Arguments_Preparation": { "a": { @@ -116547,23 +321959,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_s16", + "name": "vtrnq_u16", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "uint16x8x2_t" }, "Arguments_Preparation": { "a": { @@ -116574,23 +321989,26 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_s32", + "name": "vtrnq_u32", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "int32x4_t" + "value": "uint32x4x2_t" }, "Arguments_Preparation": { "a": { @@ -116601,165 +322019,178 @@ } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_s64", + "name": "vtrnq_u8", "arguments": [ - "int64x2_t a", - "int64x2_t b" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "int64x2_t" + "value": "uint8x16x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.16B" }, "b": { - "register": "Vm.2D" + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "TRN1", + "TRN2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_s8", + "name": "vtst_p64", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "poly64x1_t a", + "poly64x1_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Dn" }, "b": { - "register": "Vm.16B" + "register": "Dm" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_u16", + "name": "vtst_p8", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "poly8x8_t a", + "poly8x8_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm.8H" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_u32", + "name": "vtst_s16", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "uint32x4_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.4H" }, "b": { - "register": "Vm.4S" + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_u64", + "name": "vtst_s32", "arguments": [ - "uint64x2_t a", - "uint64x2_t b" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.2S" }, "b": { - "register": "Vm.2D" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp2q_u8", + "name": "vtst_s64", "arguments": [ - "uint8x16_t a", - "uint8x16_t b" + "int64x1_t a", + "int64x1_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Dn" }, "b": { - "register": "Vm.16B" + "register": "Dm" } }, "Architectures": [ @@ -116767,19 +322198,48 @@ ], "instructions": [ [ - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_f16", + "name": "vtst_s8", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "float16x4x2_t" + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "v7", + "A32", + "A64" + ], + "instructions": [ + [ + "CMTST" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vtst_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { @@ -116796,20 +322256,19 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_f32", + "name": "vtst_u32", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "float32x2x2_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { @@ -116826,50 +322285,46 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_p16", + "name": "vtst_u64", "arguments": [ - "poly16x4_t a", - "poly16x4_t b" + "uint64x1_t a", + "uint64x1_t b" ], "return_type": { - "value": "poly16x4x2_t" + "value": "uint64x1_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Dn" }, "b": { - "register": "Vm.4H" + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_p8", + "name": "vtst_u8", "arguments": [ - "poly8x8_t a", - "poly8x8_t b" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "poly8x8x2_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { @@ -116886,87 +322341,108 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_s16", + "name": "vtstd_s64", "arguments": [ - "int16x4_t a", - "int16x4_t b" + "int64_t a", + "int64_t b" ], "return_type": { - "value": "int16x4x2_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Dn" }, "b": { - "register": "Vm.4H" + "register": "Dm" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_s32", + "name": "vtstd_u64", "arguments": [ - "int32x2_t a", - "int32x2_t b" + "uint64_t a", + "uint64_t b" ], "return_type": { - "value": "int32x2x2_t" + "value": "uint64_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Dn" }, "b": { - "register": "Vm.2S" + "register": "Dm" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMTST" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vtstq_p64", + "arguments": [ + "poly64x2_t a", + "poly64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_s8", + "name": "vtstq_p8", "arguments": [ - "int8x8_t a", - "int8x8_t b" + "poly8x16_t a", + "poly8x16_t b" ], "return_type": { - "value": "int8x8x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.16B" }, "b": { - "register": "Vm.8B" + "register": "Vm.16B" } }, "Architectures": [ @@ -116976,27 +322452,26 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_u16", + "name": "vtstq_s16", "arguments": [ - "uint16x4_t a", - "uint16x4_t b" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "uint16x4x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.8H" }, "b": { - "register": "Vm.4H" + "register": "Vm.8H" } }, "Architectures": [ @@ -117006,27 +322481,26 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_u32", + "name": "vtstq_s32", "arguments": [ - "uint32x2_t a", - "uint32x2_t b" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint32x2x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4S" }, "b": { - "register": "Vm.2S" + "register": "Vm.4S" } }, "Architectures": [ @@ -117036,27 +322510,53 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzp_u8", + "name": "vtstq_s64", "arguments": [ - "uint8x8_t a", - "uint8x8_t b" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "uint8x8x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8B" + "register": "Vn.2D" }, "b": { - "register": "Vm.8B" + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMTST" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vtstq_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" } }, "Architectures": [ @@ -117066,20 +322566,19 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_f16", + "name": "vtstq_u16", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "float16x8x2_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { @@ -117096,20 +322595,19 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_f32", + "name": "vtstq_u32", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "float32x4x2_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { @@ -117126,50 +322624,46 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_p16", + "name": "vtstq_u64", "arguments": [ - "poly16x8_t a", - "poly16x8_t b" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "poly16x8x2_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2D" }, "b": { - "register": "Vm.8H" + "register": "Vm.2D" } }, "Architectures": [ - "v7", - "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_p8", + "name": "vtstq_u8", "arguments": [ - "poly8x16_t a", - "poly8x16_t b" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "poly8x16x2_t" + "value": "uint8x16_t" }, "Arguments_Preparation": { "a": { @@ -117186,170 +322680,524 @@ ], "instructions": [ [ - "UZP1", - "UZP2" + "CMTST" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqadd_s16", + "arguments": [ + "int16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqadd_s32", + "arguments": [ + "int32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "int32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2S" + }, + "b": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqadd_s64", + "arguments": [ + "int64x1_t a", + "uint64x1_t b" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dd" + }, + "b": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqadd_s8", + "arguments": [ + "int8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8B" + }, + "b": { + "register": "Vn.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqaddb_s8", + "arguments": [ + "int8_t a", + "uint8_t b" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Bd" + }, + "b": { + "register": "Bn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqaddd_s64", + "arguments": [ + "int64_t a", + "uint64_t b" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dd" + }, + "b": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqaddh_s16", + "arguments": [ + "int16_t a", + "uint16_t b" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Hd" + }, + "b": { + "register": "Hn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqaddq_s16", + "arguments": [ + "int16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.8H" + }, + "b": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqaddq_s32", + "arguments": [ + "int32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "int32x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.4S" + }, + "b": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqaddq_s64", + "arguments": [ + "int64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.2D" + }, + "b": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqaddq_s8", + "arguments": [ + "int8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vd.16B" + }, + "b": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vuqadds_s32", + "arguments": [ + "int32_t a", + "uint32_t b" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sd" + }, + "b": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_s16", + "name": "vusdot_lane_s32", "arguments": [ - "int16x8_t a", - "int16x8_t b" + "int32x2_t r", + "uint8x8_t a", + "int8x8_t b", + "const int lane" ], "return_type": { - "value": "int16x8x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm.8H" + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "USDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_s32", + "name": "vusdot_laneq_s32", "arguments": [ - "int32x4_t a", - "int32x4_t b" + "int32x2_t r", + "uint8x8_t a", + "int8x16_t b", + "const int lane" ], "return_type": { - "value": "int32x4x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.8B" }, "b": { - "register": "Vm.4S" + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "USDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_s8", + "name": "vusdot_s32", "arguments": [ - "int8x16_t a", - "int8x16_t b" + "int32x2_t r", + "uint8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "int8x16x2_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8B" }, "b": { - "register": "Vm.16B" + "register": "Vm.8B" + }, + "r": { + "register": "Vd.2S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "USDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_u16", + "name": "vusdotq_lane_s32", "arguments": [ - "uint16x8_t a", - "uint16x8_t b" + "int32x4_t r", + "uint8x16_t a", + "int8x8_t b", + "const int lane" ], "return_type": { - "value": "uint16x8x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.16B" }, "b": { - "register": "Vm.8H" + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 1 + }, + "r": { + "register": "Vd.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "USDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_u32", + "name": "vusdotq_laneq_s32", "arguments": [ - "uint32x4_t a", - "uint32x4_t b" + "int32x4_t r", + "uint8x16_t a", + "int8x16_t b", + "const int lane" ], "return_type": { - "value": "uint32x4x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vn.16B" }, "b": { - "register": "Vm.4S" + "register": "Vm.4B" + }, + "lane": { + "minimum": 0, + "maximum": 3 + }, + "r": { + "register": "Vd.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "USDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vuzpq_u8", + "name": "vusdotq_s32", "arguments": [ + "int32x4_t r", "uint8x16_t a", - "uint8x16_t b" + "int8x16_t b" ], "return_type": { - "value": "uint8x16x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { @@ -117357,53 +323205,56 @@ }, "b": { "register": "Vm.16B" + }, + "r": { + "register": "Vd.4S" } }, "Architectures": [ - "v7", "A32", "A64" ], "instructions": [ [ - "UZP1", - "UZP2" + "USDOT" ] ] }, { "SIMD_ISA": "Neon", - "name": "vxarq_u64", + "name": "vusmmlaq_s32", "arguments": [ - "uint64x2_t a", - "uint64x2_t b", - "const int imm6" + "int32x4_t r", + "uint8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "uint64x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.16B" }, - "b": {}, - "imm6": { - "minimum": 0, - "maximum": 63 + "b": { + "register": "Vm.16B" + }, + "r": { + "register": "Vd.4S" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "XAR" + "USMMLA" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_f16", + "name": "vuzp1_f16", "arguments": [ "float16x4_t a", "float16x4_t b" @@ -117424,13 +323275,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_f32", + "name": "vuzp1_f32", "arguments": [ "float32x2_t a", "float32x2_t b" @@ -117451,13 +323302,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_p16", + "name": "vuzp1_p16", "arguments": [ "poly16x4_t a", "poly16x4_t b" @@ -117478,13 +323329,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_p8", + "name": "vuzp1_p8", "arguments": [ "poly8x8_t a", "poly8x8_t b" @@ -117505,13 +323356,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_s16", + "name": "vuzp1_s16", "arguments": [ "int16x4_t a", "int16x4_t b" @@ -117532,13 +323383,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_s32", + "name": "vuzp1_s32", "arguments": [ "int32x2_t a", "int32x2_t b" @@ -117559,13 +323410,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_s8", + "name": "vuzp1_s8", "arguments": [ "int8x8_t a", "int8x8_t b" @@ -117586,13 +323437,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_u16", + "name": "vuzp1_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b" @@ -117613,13 +323464,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_u32", + "name": "vuzp1_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b" @@ -117640,13 +323491,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1_u8", + "name": "vuzp1_u8", "arguments": [ "uint8x8_t a", "uint8x8_t b" @@ -117667,13 +323518,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_f16", + "name": "vuzp1q_f16", "arguments": [ "float16x8_t a", "float16x8_t b" @@ -117694,13 +323545,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_f32", + "name": "vuzp1q_f32", "arguments": [ "float32x4_t a", "float32x4_t b" @@ -117721,13 +323572,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_f64", + "name": "vuzp1q_f64", "arguments": [ "float64x2_t a", "float64x2_t b" @@ -117748,13 +323599,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_p16", + "name": "vuzp1q_p16", "arguments": [ "poly16x8_t a", "poly16x8_t b" @@ -117775,13 +323626,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_p64", + "name": "vuzp1q_p64", "arguments": [ "poly64x2_t a", "poly64x2_t b" @@ -117802,13 +323653,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_p8", + "name": "vuzp1q_p8", "arguments": [ "poly8x16_t a", "poly8x16_t b" @@ -117829,13 +323680,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_s16", + "name": "vuzp1q_s16", "arguments": [ "int16x8_t a", "int16x8_t b" @@ -117856,13 +323707,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_s32", + "name": "vuzp1q_s32", "arguments": [ "int32x4_t a", "int32x4_t b" @@ -117883,13 +323734,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_s64", + "name": "vuzp1q_s64", "arguments": [ "int64x2_t a", "int64x2_t b" @@ -117910,13 +323761,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_s8", + "name": "vuzp1q_s8", "arguments": [ "int8x16_t a", "int8x16_t b" @@ -117937,13 +323788,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_u16", + "name": "vuzp1q_u16", "arguments": [ "uint16x8_t a", "uint16x8_t b" @@ -117964,13 +323815,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_u32", + "name": "vuzp1q_u32", "arguments": [ "uint32x4_t a", "uint32x4_t b" @@ -117991,13 +323842,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_u64", + "name": "vuzp1q_u64", "arguments": [ "uint64x2_t a", "uint64x2_t b" @@ -118018,13 +323869,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip1q_u8", + "name": "vuzp1q_u8", "arguments": [ "uint8x16_t a", "uint8x16_t b" @@ -118045,13 +323896,13 @@ ], "instructions": [ [ - "ZIP1" + "UZP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_f16", + "name": "vuzp2_f16", "arguments": [ "float16x4_t a", "float16x4_t b" @@ -118072,13 +323923,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_f32", + "name": "vuzp2_f32", "arguments": [ "float32x2_t a", "float32x2_t b" @@ -118099,13 +323950,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_p16", + "name": "vuzp2_p16", "arguments": [ "poly16x4_t a", "poly16x4_t b" @@ -118126,13 +323977,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_p8", + "name": "vuzp2_p8", "arguments": [ "poly8x8_t a", "poly8x8_t b" @@ -118153,13 +324004,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_s16", + "name": "vuzp2_s16", "arguments": [ "int16x4_t a", "int16x4_t b" @@ -118180,13 +324031,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_s32", + "name": "vuzp2_s32", "arguments": [ "int32x2_t a", "int32x2_t b" @@ -118207,13 +324058,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_s8", + "name": "vuzp2_s8", "arguments": [ "int8x8_t a", "int8x8_t b" @@ -118234,13 +324085,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_u16", + "name": "vuzp2_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b" @@ -118261,13 +324112,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_u32", + "name": "vuzp2_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b" @@ -118288,13 +324139,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2_u8", + "name": "vuzp2_u8", "arguments": [ "uint8x8_t a", "uint8x8_t b" @@ -118315,13 +324166,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_f16", + "name": "vuzp2q_f16", "arguments": [ "float16x8_t a", "float16x8_t b" @@ -118342,13 +324193,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_f32", + "name": "vuzp2q_f32", "arguments": [ "float32x4_t a", "float32x4_t b" @@ -118369,13 +324220,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_f64", + "name": "vuzp2q_f64", "arguments": [ "float64x2_t a", "float64x2_t b" @@ -118396,13 +324247,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_p16", + "name": "vuzp2q_p16", "arguments": [ "poly16x8_t a", "poly16x8_t b" @@ -118423,13 +324274,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_p64", + "name": "vuzp2q_p64", "arguments": [ "poly64x2_t a", "poly64x2_t b" @@ -118450,13 +324301,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_p8", + "name": "vuzp2q_p8", "arguments": [ "poly8x16_t a", "poly8x16_t b" @@ -118477,13 +324328,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_s16", + "name": "vuzp2q_s16", "arguments": [ "int16x8_t a", "int16x8_t b" @@ -118504,13 +324355,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_s32", + "name": "vuzp2q_s32", "arguments": [ "int32x4_t a", "int32x4_t b" @@ -118531,13 +324382,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_s64", + "name": "vuzp2q_s64", "arguments": [ "int64x2_t a", "int64x2_t b" @@ -118558,13 +324409,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_s8", + "name": "vuzp2q_s8", "arguments": [ "int8x16_t a", "int8x16_t b" @@ -118585,13 +324436,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_u16", + "name": "vuzp2q_u16", "arguments": [ "uint16x8_t a", "uint16x8_t b" @@ -118612,13 +324463,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_u32", + "name": "vuzp2q_u32", "arguments": [ "uint32x4_t a", "uint32x4_t b" @@ -118639,13 +324490,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_u64", + "name": "vuzp2q_u64", "arguments": [ "uint64x2_t a", "uint64x2_t b" @@ -118666,13 +324517,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip2q_u8", + "name": "vuzp2q_u8", "arguments": [ "uint8x16_t a", "uint8x16_t b" @@ -118693,13 +324544,13 @@ ], "instructions": [ [ - "ZIP2" + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_f16", + "name": "vuzp_f16", "arguments": [ "float16x4_t a", "float16x4_t b" @@ -118722,14 +324573,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_f32", + "name": "vuzp_f32", "arguments": [ "float32x2_t a", "float32x2_t b" @@ -118752,14 +324603,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_p16", + "name": "vuzp_p16", "arguments": [ "poly16x4_t a", "poly16x4_t b" @@ -118782,14 +324633,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_p8", + "name": "vuzp_p8", "arguments": [ "poly8x8_t a", "poly8x8_t b" @@ -118812,14 +324663,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_s16", + "name": "vuzp_s16", "arguments": [ "int16x4_t a", "int16x4_t b" @@ -118842,14 +324693,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_s32", + "name": "vuzp_s32", "arguments": [ "int32x2_t a", "int32x2_t b" @@ -118872,14 +324723,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_s8", + "name": "vuzp_s8", "arguments": [ "int8x8_t a", "int8x8_t b" @@ -118902,14 +324753,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_u16", + "name": "vuzp_u16", "arguments": [ "uint16x4_t a", "uint16x4_t b" @@ -118932,14 +324783,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_u32", + "name": "vuzp_u32", "arguments": [ "uint32x2_t a", "uint32x2_t b" @@ -118962,14 +324813,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzip_u8", + "name": "vuzp_u8", "arguments": [ "uint8x8_t a", "uint8x8_t b" @@ -118992,14 +324843,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_f16", + "name": "vuzpq_f16", "arguments": [ "float16x8_t a", "float16x8_t b" @@ -119022,14 +324873,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_f32", + "name": "vuzpq_f32", "arguments": [ "float32x4_t a", "float32x4_t b" @@ -119052,14 +324903,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_p16", + "name": "vuzpq_p16", "arguments": [ "poly16x8_t a", "poly16x8_t b" @@ -119082,14 +324933,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_p8", + "name": "vuzpq_p8", "arguments": [ "poly8x16_t a", "poly8x16_t b" @@ -119112,14 +324963,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_s16", + "name": "vuzpq_s16", "arguments": [ "int16x8_t a", "int16x8_t b" @@ -119142,14 +324993,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_s32", + "name": "vuzpq_s32", "arguments": [ "int32x4_t a", "int32x4_t b" @@ -119172,14 +325023,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_s8", + "name": "vuzpq_s8", "arguments": [ "int8x16_t a", "int8x16_t b" @@ -119202,14 +325053,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_u16", + "name": "vuzpq_u16", "arguments": [ "uint16x8_t a", "uint16x8_t b" @@ -119232,14 +325083,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_u32", + "name": "vuzpq_u32", "arguments": [ "uint32x4_t a", "uint32x4_t b" @@ -119262,14 +325113,14 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vzipq_u8", + "name": "vuzpq_u8", "arguments": [ "uint8x16_t a", "uint8x16_t b" @@ -119292,14 +325143,46 @@ ], "instructions": [ [ - "ZIP1", - "ZIP2" + "UZP1", + "UZP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vamin_f16", + "name": "vxarq_u64", + "arguments": [ + "uint64x2_t a", + "uint64x2_t b", + "const int imm6" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, + "imm6": { + "minimum": 0, + "maximum": 63 + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1_f16", "arguments": [ "float16x4_t a", "float16x4_t b" @@ -119320,26 +325203,26 @@ ], "instructions": [ [ - "FAMIN" + "ZIP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vaminq_f16", + "name": "vzip1_f32", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "float16x8_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2S" }, "b": { - "register": "Vm.8H" + "register": "Vm.2S" } }, "Architectures": [ @@ -119347,19 +325230,100 @@ ], "instructions": [ [ - "FAMIN" + "ZIP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vamin_f32", + "name": "vzip1_p16", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "poly16x4_t a", + "poly16x4_t b" ], "return_type": { - "value": "float32x2_t" + "value": "poly16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1_p8", + "arguments": [ + "poly8x8_t a", + "poly8x8_t b" + ], + "return_type": { + "value": "poly8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1_s16", + "arguments": [ + "int16x4_t a", + "int16x4_t b" + ], + "return_type": { + "value": "int16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1_s32", + "arguments": [ + "int32x2_t a", + "int32x2_t b" + ], + "return_type": { + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { @@ -119374,13 +325338,148 @@ ], "instructions": [ [ - "FAMIN" + "ZIP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vaminq_f32", + "name": "vzip1_s8", + "arguments": [ + "int8x8_t a", + "int8x8_t b" + ], + "return_type": { + "value": "int8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1_u16", + "arguments": [ + "uint16x4_t a", + "uint16x4_t b" + ], + "return_type": { + "value": "uint16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1_u32", + "arguments": [ + "uint32x2_t a", + "uint32x2_t b" + ], + "return_type": { + "value": "uint32x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1_u8", + "arguments": [ + "uint8x8_t a", + "uint8x8_t b" + ], + "return_type": { + "value": "uint8x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8B" + }, + "b": { + "register": "Vm.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1q_f16", + "arguments": [ + "float16x8_t a", + "float16x8_t b" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1q_f32", "arguments": [ "float32x4_t a", "float32x4_t b" @@ -119401,13 +325500,13 @@ ], "instructions": [ [ - "FAMIN" + "ZIP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vaminq_f64", + "name": "vzip1q_f64", "arguments": [ "float64x2_t a", "float64x2_t b" @@ -119428,26 +325527,26 @@ ], "instructions": [ [ - "FAMIN" + "ZIP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vamax_f16", + "name": "vzip1q_p16", "arguments": [ - "float16x4_t a", - "float16x4_t b" + "poly16x8_t a", + "poly16x8_t b" ], "return_type": { - "value": "float16x4_t" + "value": "poly16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.4H" + "register": "Vn.8H" }, "b": { - "register": "Vm.4H" + "register": "Vm.8H" } }, "Architectures": [ @@ -119455,19 +325554,73 @@ ], "instructions": [ [ - "FAMAX" + "ZIP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vamaxq_f16", + "name": "vzip1q_p64", "arguments": [ - "float16x8_t a", - "float16x8_t b" + "poly64x2_t a", + "poly64x2_t b" ], "return_type": { - "value": "float16x8_t" + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1q_p8", + "arguments": [ + "poly8x16_t a", + "poly8x16_t b" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1q_s16", + "arguments": [ + "int16x8_t a", + "int16x8_t b" + ], + "return_type": { + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { @@ -119482,26 +325635,26 @@ ], "instructions": [ [ - "FAMAX" + "ZIP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vamax_f32", + "name": "vzip1q_s32", "arguments": [ - "float32x2_t a", - "float32x2_t b" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "float32x2_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vn.4S" }, "b": { - "register": "Vm.2S" + "register": "Vm.4S" } }, "Architectures": [ @@ -119509,19 +325662,100 @@ ], "instructions": [ [ - "FAMAX" + "ZIP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vamaxq_f32", + "name": "vzip1q_s64", "arguments": [ - "float32x4_t a", - "float32x4_t b" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "float32x4_t" + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1q_s8", + "arguments": [ + "int8x16_t a", + "int8x16_t b" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1q_u16", + "arguments": [ + "uint16x8_t a", + "uint16x8_t b" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1q_u32", + "arguments": [ + "uint32x4_t a", + "uint32x4_t b" + ], + "return_type": { + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { @@ -119536,26 +325770,107 @@ ], "instructions": [ [ - "FAMAX" + "ZIP1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vamaxq_f64", + "name": "vzip1q_u64", "arguments": [ - "float64x2_t a", - "float64x2_t b" + "uint64x2_t a", + "uint64x2_t b" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip1q_u8", + "arguments": [ + "uint8x16_t a", + "uint8x16_t b" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.16B" + }, + "b": { + "register": "Vm.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip2_f16", + "arguments": [ + "float16x4_t a", + "float16x4_t b" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vzip2_f32", + "arguments": [ + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "float64x2_t" + "value": "float32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.2D" + "register": "Vn.2S" }, "b": { - "register": "Vm.2D" + "register": "Vm.2S" } }, "Architectures": [ @@ -119563,34 +325878,26 @@ ], "instructions": [ [ - "FAMAX" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_lane_f16", + "name": "vzip2_p16", "arguments": [ - "float16x4_t a", - "uint8x8_t b", - "const int index" + "poly16x4_t a", + "poly16x4_t b" ], "return_type": { - "value": "float16x8_t" + "value": "poly16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.4H" } }, "Architectures": [ @@ -119598,34 +325905,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_lane_s16", + "name": "vzip2_p8", "arguments": [ - "int16x4_t a", - "uint8x8_t b", - "const int index" + "poly8x8_t a", + "poly8x8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "poly8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.8B" } }, "Architectures": [ @@ -119633,34 +325932,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_lane_u16", + "name": "vzip2_s16", "arguments": [ - "uint16x4_t a", - "uint8x8_t b", - "const int index" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "int16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, "b": { - "register": "Vm" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.4H" } }, "Architectures": [ @@ -119668,34 +325959,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_lane_p16", + "name": "vzip2_s32", "arguments": [ - "poly16x4_t a", - "uint8x8_t b", - "const int index" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "int32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2S" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.2S" } }, "Architectures": [ @@ -119703,34 +325986,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_laneq_f16", + "name": "vzip2_s8", "arguments": [ - "float16x4_t a", - "uint8x16_t b", - "const int index" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "float16x8_t" + "value": "int8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.8B" } }, "Architectures": [ @@ -119738,34 +326013,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_laneq_s16", + "name": "vzip2_u16", "arguments": [ - "int16x4_t a", - "uint8x16_t b", - "const int index" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "uint16x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4H" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.4H" } }, "Architectures": [ @@ -119773,34 +326040,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_laneq_u16", + "name": "vzip2_u32", "arguments": [ - "uint16x4_t a", - "uint8x16_t b", - "const int index" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "uint32x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2S" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.2S" } }, "Architectures": [ @@ -119808,34 +326067,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_laneq_p16", + "name": "vzip2_u8", "arguments": [ - "poly16x4_t a", - "uint8x16_t b", - "const int index" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.8B" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.8B" } }, "Architectures": [ @@ -119843,17 +326094,16 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_lane_f16", + "name": "vzip2q_f16", "arguments": [ "float16x8_t a", - "uint8x8_t b", - "const int index" + "float16x8_t b" ], "return_type": { "value": "float16x8_t" @@ -119863,14 +326113,7 @@ "register": "Vn.8H" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.8H" } }, "Architectures": [ @@ -119878,34 +326121,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_lane_s16", + "name": "vzip2q_f32", "arguments": [ - "int16x8_t a", - "uint8x8_t b", - "const int index" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "float32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.4S" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.4S" } }, "Architectures": [ @@ -119913,34 +326148,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_lane_u16", + "name": "vzip2q_f64", "arguments": [ - "uint16x8_t a", - "uint8x8_t b", - "const int index" + "float64x2_t a", + "float64x2_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "float64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2D" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.2D" } }, "Architectures": [ @@ -119948,17 +326175,16 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_lane_p16", + "name": "vzip2q_p16", "arguments": [ "poly16x8_t a", - "uint8x8_t b", - "const int index" + "poly16x8_t b" ], "return_type": { "value": "poly16x8_t" @@ -119968,14 +326194,7 @@ "register": "Vn.8H" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.8H" } }, "Architectures": [ @@ -119983,34 +326202,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_f16", + "name": "vzip2q_p64", "arguments": [ - "float16x8_t a", - "uint8x16_t b", - "const int index" + "poly64x2_t a", + "poly64x2_t b" ], "return_type": { - "value": "float16x8_t" + "value": "poly64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2D" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.2D" } }, "Architectures": [ @@ -120018,34 +326229,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_s16", + "name": "vzip2q_p8", "arguments": [ - "int16x8_t a", - "uint8x16_t b", - "const int index" + "poly8x16_t a", + "poly8x16_t b" ], "return_type": { - "value": "int16x8_t" + "value": "poly8x16_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.16B" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.16B" } }, "Architectures": [ @@ -120053,34 +326256,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_u16", + "name": "vzip2q_s16", "arguments": [ - "uint16x8_t a", - "uint8x16_t b", - "const int index" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "int16x8_t" }, "Arguments_Preparation": { "a": { "register": "Vn.8H" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.8H" } }, "Architectures": [ @@ -120088,34 +326283,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_lane_u8", + "name": "vzip2q_s32", "arguments": [ - "uint8x8_t a", - "uint8x8_t b", - "const int lane" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.4S" } }, "Architectures": [ @@ -120123,34 +326310,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_lane_u8", + "name": "vzip2q_s64", "arguments": [ - "uint8x16_t a", - "uint8x8_t b", - "const int lane" + "int64x2_t a", + "int64x2_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2D" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.2D" } }, "Architectures": [ @@ -120158,17 +326337,16 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_lane_s8", + "name": "vzip2q_s8", "arguments": [ - "int8x8_t a", - "uint8x8_t b", - "const int lane" + "int8x16_t a", + "int8x16_t b" ], "return_type": { "value": "int8x16_t" @@ -120178,14 +326356,7 @@ "register": "Vn.16B" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.16B" } }, "Architectures": [ @@ -120193,34 +326364,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_lane_s8", + "name": "vzip2q_u16", "arguments": [ - "int8x16_t a", - "uint8x8_t b", - "const int lane" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint16x8_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8H" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.8H" } }, "Architectures": [ @@ -120228,34 +326391,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_lane_p8", + "name": "vzip2q_u32", "arguments": [ - "poly8x8_t a", - "uint8x8_t b", - "const int lane" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "uint32x4_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4S" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.4S" } }, "Architectures": [ @@ -120263,34 +326418,26 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_lane_p8", + "name": "vzip2q_u64", "arguments": [ - "poly8x16_t a", - "uint8x8_t b", - "const int lane" + "uint64x2_t a", + "uint64x2_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "uint64x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2D" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.2D" } }, "Architectures": [ @@ -120298,17 +326445,16 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_laneq_u8", + "name": "vzip2q_u8", "arguments": [ - "uint8x8_t a", - "uint8x16_t b", - "const int index" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { "value": "uint8x16_t" @@ -120318,14 +326464,7 @@ "register": "Vn.16B" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.16B" } }, "Architectures": [ @@ -120333,707 +326472,607 @@ ], "instructions": [ [ - "LUTI2" + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_u8", + "name": "vzip_f16", "arguments": [ - "uint8x16_t a", - "uint8x16_t b", - "const int index" + "float16x4_t a", + "float16x4_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "float16x4x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4H" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI2" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_laneq_s8", + "name": "vzip_f32", "arguments": [ - "int8x8_t a", - "uint8x16_t b", - "const int index" + "float32x2_t a", + "float32x2_t b" ], "return_type": { - "value": "int8x16_t" + "value": "float32x2x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.2S" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI2" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_s8", + "name": "vzip_p16", "arguments": [ - "int8x16_t a", - "uint8x16_t b", - "const int index" + "poly16x4_t a", + "poly16x4_t b" ], "return_type": { - "value": "int8x16_t" + "value": "poly16x4x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4H" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI2" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2_laneq_p8", + "name": "vzip_p8", "arguments": [ "poly8x8_t a", - "uint8x16_t b", - "const int index" + "poly8x8_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "poly8x8x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.8B" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI2" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_p8", + "name": "vzip_s16", "arguments": [ - "poly8x16_t a", - "uint8x16_t b", - "const int index" + "int16x4_t a", + "int16x4_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "int16x4x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.16B" + "register": "Vn.4H" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.16B" + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI2" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_p16", + "name": "vzip_s32", "arguments": [ - "poly16x8_t a", - "uint8x16_t b", - "const int index" + "int32x2_t a", + "int32x2_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "int32x2x2_t" }, "Arguments_Preparation": { "a": { - "register": "Vn.8H" + "register": "Vn.2S" }, "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI2" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_lane_u8", + "name": "vzip_s8", "arguments": [ - "uint8x16_t vn", - "uint8x8_t vm", - "const int index" + "int8x8_t a", + "int8x8_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "int8x8x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn.16B" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 0 + "a": { + "register": "Vn.8B" }, - "r": { - "register": "Vd.16B" + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_u8", + "name": "vzip_u16", "arguments": [ - "uint8x16_t vn", - "uint8x16_t vm", - "const int index" + "uint16x4_t a", + "uint16x4_t b" ], "return_type": { - "value": "uint8x16_t" + "value": "uint16x4x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn.16B" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vn.4H" }, - "r": { - "register": "Vd.16B" + "b": { + "register": "Vm.4H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_lane_s8", + "name": "vzip_u32", "arguments": [ - "int8x16_t vn", - "uint8x8_t vm", - "const int index" + "uint32x2_t a", + "uint32x2_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint32x2x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn.16B" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 0 + "a": { + "register": "Vn.2S" }, - "r": { - "register": "Vd.16B" + "b": { + "register": "Vm.2S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_s8", + "name": "vzip_u8", "arguments": [ - "int8x16_t vn", - "uint8x16_t vm", - "const int index" + "uint8x8_t a", + "uint8x8_t b" ], "return_type": { - "value": "int8x16_t" + "value": "uint8x8x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn.16B" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vn.8B" }, - "r": { - "register": "Vd.16B" + "b": { + "register": "Vm.8B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_lane_p8", + "name": "vzipq_f16", "arguments": [ - "poly8x16_t vn", - "uint8x8_t vm", - "const int index" + "float16x8_t a", + "float16x8_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "float16x8x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn.16B" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 0 + "a": { + "register": "Vn.8H" }, - "r": { - "register": "Vd.16B" + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_p8", + "name": "vzipq_f32", "arguments": [ - "poly8x16_t vn", - "uint8x16_t vm", - "const int index" + "float32x4_t a", + "float32x4_t b" ], "return_type": { - "value": "poly8x16_t" + "value": "float32x4x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn.16B" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vn.4S" }, - "r": { - "register": "Vd.16B" + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_lane_u16_x2", + "name": "vzipq_p16", "arguments": [ - "uint16x8x2_t vn", - "uint8x8_t vm", - "const int index" + "poly16x8_t a", + "poly16x8_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "poly16x8x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vn.8H" }, - "r": { - "register": "Vd.8H" + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_u16_x2", + "name": "vzipq_p8", "arguments": [ - "uint16x8x2_t vn", - "uint8x16_t vm", - "const int index" + "poly8x16_t a", + "poly8x16_t b" ], "return_type": { - "value": "uint16x8_t" + "value": "poly8x16x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 + "a": { + "register": "Vn.16B" }, - "r": { - "register": "Vd.8H" + "b": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_lane_s16_x2", + "name": "vzipq_s16", "arguments": [ - "int16x8x2_t vn", - "uint8x8_t vm", - "const int index" + "int16x8_t a", + "int16x8_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int16x8x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vn.8H" }, - "r": { - "register": "Vd.8H" + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_s16_x2", + "name": "vzipq_s32", "arguments": [ - "int16x8x2_t vn", - "uint8x16_t vm", - "const int index" + "int32x4_t a", + "int32x4_t b" ], "return_type": { - "value": "int16x8_t" + "value": "int32x4x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 + "a": { + "register": "Vn.4S" }, - "r": { - "register": "Vd.8H" + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_lane_f16_x2", + "name": "vzipq_s8", "arguments": [ - "float16x8x2_t vn", - "uint8x8_t vm", - "const int index" + "int8x16_t a", + "int8x16_t b" ], "return_type": { - "value": "float16x8_t" + "value": "int8x16x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vn.16B" }, - "r": { - "register": "Vd.8H" + "b": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_f16_x2", + "name": "vzipq_u16", "arguments": [ - "float16x8x2_t vn", - "uint8x16_t vm", - "const int index" + "uint16x8_t a", + "uint16x8_t b" ], "return_type": { - "value": "float16x8_t" + "value": "uint16x8x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 + "a": { + "register": "Vn.8H" }, - "r": { - "register": "Vd.8H" + "b": { + "register": "Vm.8H" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_lane_p16_x2", + "name": "vzipq_u32", "arguments": [ - "poly16x8x2_t vn", - "uint8x8_t vm", - "const int index" + "uint32x4_t a", + "uint32x4_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "uint32x4x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 + "a": { + "register": "Vn.4S" }, - "r": { - "register": "Vd.8H" + "b": { + "register": "Vm.4S" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] }, { "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_p16_x2", + "name": "vzipq_u8", "arguments": [ - "poly16x8x2_t vn", - "uint8x16_t vm", - "const int index" + "uint8x16_t a", + "uint8x16_t b" ], "return_type": { - "value": "poly16x8_t" + "value": "uint8x16x2_t" }, "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 + "a": { + "register": "Vn.16B" }, - "r": { - "register": "Vd.8H" + "b": { + "register": "Vm.16B" } }, "Architectures": [ + "v7", + "A32", "A64" ], "instructions": [ [ - "LUTI4" + "ZIP1", + "ZIP2" ] ] },